[llvm] d6b0734 - [NFC] Use Register instead of unsigned
Jim Lin via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 19 04:31:00 PST 2022
Author: Jim Lin
Date: 2022-01-19T20:17:04+08:00
New Revision: d6b07348371247a2dff4635640af7be110bc7ee6
URL: https://github.com/llvm/llvm-project/commit/d6b07348371247a2dff4635640af7be110bc7ee6
DIFF: https://github.com/llvm/llvm-project/commit/d6b07348371247a2dff4635640af7be110bc7ee6.diff
LOG: [NFC] Use Register instead of unsigned
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
llvm/lib/Target/AArch64/AArch64FastISel.cpp
llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
llvm/lib/Target/ARM/ARMFastISel.cpp
llvm/lib/Target/ARM/ARMFrameLowering.cpp
llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
llvm/lib/Target/AVR/AVRFrameLowering.cpp
llvm/lib/Target/AVR/AVRISelLowering.cpp
llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
llvm/lib/Target/Lanai/LanaiISelLowering.cpp
llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
llvm/lib/Target/Mips/Mips16FrameLowering.cpp
llvm/lib/Target/Mips/Mips16InstrInfo.cpp
llvm/lib/Target/Mips/MipsAsmPrinter.cpp
llvm/lib/Target/Mips/MipsFastISel.cpp
llvm/lib/Target/Mips/MipsISelLowering.cpp
llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
llvm/lib/Target/PowerPC/PPCFastISel.cpp
llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
llvm/lib/Target/Sparc/SparcISelLowering.cpp
llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
llvm/lib/Target/VE/VEISelLowering.cpp
llvm/lib/Target/VE/VEInstrInfo.cpp
llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
llvm/lib/Target/X86/X86FrameLowering.cpp
llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
llvm/lib/Target/XCore/XCoreFrameLowering.cpp
llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index ee6e670fe3cd7..109b739528bf4 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -443,7 +443,7 @@ bool AArch64ExpandPseudo::expand_DestructiveOp(
uint64_t FalseLanes = MI.getDesc().TSFlags & AArch64::FalseLanesMask;
bool FalseZero = FalseLanes == AArch64::FalseLanesZero;
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
if (DType == AArch64::DestructiveBinary)
@@ -989,7 +989,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
.addReg(DstReg, RegState::Kill)
.addReg(DstReg, DstFlags | RegState::Implicit);
} else {
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
MIB2 = BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui))
.add(MI.getOperand(0))
.addUse(DstReg, RegState::Kill);
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index 3dc694df509df..c67fa62c7a926 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -355,7 +355,7 @@ unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
.addFrameIndex(SI->second)
@@ -378,7 +378,7 @@ unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) {
const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass
: &AArch64::GPR32RegClass;
unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR;
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
ResultReg).addReg(ZeroReg, getKillRegState(true));
return ResultReg;
@@ -410,11 +410,11 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
const TargetRegisterClass *RC = Is64Bit ?
&AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc1), TmpReg)
.addImm(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(TmpReg, getKillRegState(true));
@@ -427,12 +427,12 @@ unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
Align Alignment = DL.getPrefTypeAlign(CFP->getType());
unsigned CPI = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
- unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
ADRPReg).addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGE);
unsigned Opc = Is64Bit ? AArch64::LDRDui : AArch64::LDRSui;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(ADRPReg)
.addConstantPoolIndex(CPI, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
@@ -455,7 +455,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
if (!DestEVT.isSimple())
return 0;
- unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
unsigned ResultReg;
if (OpFlags & AArch64II::MO_GOT) {
@@ -482,7 +482,7 @@ unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {
// LDRWui produces a 32-bit register, but pointers in-register are 64-bits
// so we must extend the result on ILP32.
- unsigned Result64 = createResultReg(&AArch64::GPR64RegClass);
+ Register Result64 = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG))
.addDef(Result64)
@@ -751,7 +751,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
if (const auto *C = dyn_cast<ConstantInt>(RHS))
if (C->getValue() == 0xffffffff) {
Addr.setExtendType(AArch64_AM::UXTW);
- unsigned Reg = getRegForValue(LHS);
+ Register Reg = getRegForValue(LHS);
if (!Reg)
return false;
Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32);
@@ -760,7 +760,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
}
}
- unsigned Reg = getRegForValue(Src);
+ Register Reg = getRegForValue(Src);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -821,7 +821,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
}
}
- unsigned Reg = getRegForValue(Src);
+ Register Reg = getRegForValue(Src);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -847,7 +847,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
Addr.setExtendType(AArch64_AM::LSL);
Addr.setExtendType(AArch64_AM::UXTW);
- unsigned Reg = getRegForValue(LHS);
+ Register Reg = getRegForValue(LHS);
if (!Reg)
return false;
Reg = fastEmitInst_extractsubreg(MVT::i32, Reg, AArch64::sub_32);
@@ -879,7 +879,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
break;
Addr.setShift(0);
- unsigned Reg = getRegForValue(Src);
+ Register Reg = getRegForValue(Src);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -888,7 +888,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
} // end switch
if (Addr.isRegBase() && !Addr.getReg()) {
- unsigned Reg = getRegForValue(Obj);
+ Register Reg = getRegForValue(Obj);
if (!Reg)
return false;
Addr.setReg(Reg);
@@ -896,7 +896,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
}
if (!Addr.getOffsetReg()) {
- unsigned Reg = getRegForValue(Obj);
+ Register Reg = getRegForValue(Obj);
if (!Reg)
return false;
Addr.setOffsetReg(Reg);
@@ -1034,7 +1034,7 @@ bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {
// continue. This should almost never happen.
if ((ImmediateOffsetNeedsLowering || Addr.getOffsetReg()) && Addr.isFIBase())
{
- unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADDXri),
ResultReg)
.addFrameIndex(Addr.getFI())
@@ -1178,7 +1178,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
SI->getOpcode() == Instruction::AShr )
std::swap(LHS, RHS);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return 0;
@@ -1207,13 +1207,13 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
if (const auto *SI = dyn_cast<BinaryOperator>(RHS))
if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1)))
if ((SI->getOpcode() == Instruction::Shl) && (C->getZExtValue() < 4)) {
- unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ Register RHSReg = getRegForValue(SI->getOperand(0));
if (!RHSReg)
return 0;
return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType,
C->getZExtValue(), SetFlags, WantResult);
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
return emitAddSub_rx(UseAdd, RetVT, LHSReg, RHSReg, ExtendType, 0,
@@ -1232,7 +1232,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
assert(isa<ConstantInt>(MulRHS) && "Expected a ConstantInt.");
uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
- unsigned RHSReg = getRegForValue(MulLHS);
+ Register RHSReg = getRegForValue(MulLHS);
if (!RHSReg)
return 0;
ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, AArch64_AM::LSL,
@@ -1255,7 +1255,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
}
uint64_t ShiftVal = C->getZExtValue();
if (ShiftType != AArch64_AM::InvalidShiftExtend) {
- unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ Register RHSReg = getRegForValue(SI->getOperand(0));
if (!RHSReg)
return 0;
ResultReg = emitAddSub_rs(UseAdd, RetVT, LHSReg, RHSReg, ShiftType,
@@ -1267,7 +1267,7 @@ unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
}
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
@@ -1489,7 +1489,7 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {
if (CFP->isZero() && !CFP->isNegative())
UseImm = true;
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return false;
@@ -1500,7 +1500,7 @@ bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {
return true;
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return false;
@@ -1577,7 +1577,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
if (isa<ConstantInt>(SI->getOperand(1)))
std::swap(LHS, RHS);
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return 0;
@@ -1602,7 +1602,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
assert(isa<ConstantInt>(MulRHS) && "Expected a ConstantInt.");
uint64_t ShiftVal = cast<ConstantInt>(MulRHS)->getValue().logBase2();
- unsigned RHSReg = getRegForValue(MulLHS);
+ Register RHSReg = getRegForValue(MulLHS);
if (!RHSReg)
return 0;
ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
@@ -1616,7 +1616,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
if (const auto *SI = dyn_cast<ShlOperator>(RHS))
if (const auto *C = dyn_cast<ConstantInt>(SI->getOperand(1))) {
uint64_t ShiftVal = C->getZExtValue();
- unsigned RHSReg = getRegForValue(SI->getOperand(0));
+ Register RHSReg = getRegForValue(SI->getOperand(0));
if (!RHSReg)
return 0;
ResultReg = emitLogicalOp_rs(ISDOpc, RetVT, LHSReg, RHSReg, ShiftVal);
@@ -1625,7 +1625,7 @@ unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
}
}
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return 0;
@@ -1673,7 +1673,7 @@ unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT,
if (!AArch64_AM::isLogicalImmediate(Imm, RegSize))
return 0;
- unsigned ResultReg =
+ Register ResultReg =
fastEmitInst_ri(Opc, RC, LHSReg,
AArch64_AM::encodeLogicalImmediate(Imm, RegSize));
if (RetVT >= MVT::i8 && RetVT <= MVT::i16 && ISDOpc != ISD::AND) {
@@ -1715,7 +1715,7 @@ unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT,
RC = &AArch64::GPR64RegClass;
break;
}
- unsigned ResultReg =
+ Register ResultReg =
fastEmitInst_rri(Opc, RC, LHSReg, RHSReg,
AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftImm));
if (RetVT >= MVT::i8 && RetVT <= MVT::i16) {
@@ -1841,7 +1841,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
}
// Create the base instruction, then add the operands.
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
addLoadStoreOperands(Addr, MIB, MachineMemOperand::MOLoad, ScaleFactor, MMO);
@@ -1856,7 +1856,7 @@ unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
// For zero-extending loads to 64bit we emit a 32bit load and then convert
// the 32bit reg to a 64bit reg.
if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) {
- unsigned Reg64 = createResultReg(&AArch64::GPR64RegClass);
+ Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), Reg64)
.addImm(0)
@@ -1991,7 +1991,7 @@ bool AArch64FastISel::selectLoad(const Instruction *I) {
// The integer extend hasn't been emitted yet. FastISel or SelectionDAG
// could select it. Emit a copy to subreg if necessary. FastISel will remove
// it when it selects the integer extend.
- unsigned Reg = lookUpRegForValue(IntExtVal);
+ Register Reg = lookUpRegForValue(IntExtVal);
auto *MI = MRI.getUniqueVRegDef(Reg);
if (!MI) {
if (RetVT == MVT::i64 && VT <= MVT::i32) {
@@ -2174,7 +2174,7 @@ bool AArch64FastISel::selectStore(const Instruction *I) {
// The non-atomic instructions are sufficient for relaxed stores.
if (isReleaseOrStronger(Ord)) {
// The STLR addressing mode only supports a base reg; pass that directly.
- unsigned AddrReg = getRegForValue(PtrV);
+ Register AddrReg = getRegForValue(PtrV);
return emitStoreRelease(VT, SrcReg, AddrReg,
createMachineMemOperandFor(I));
}
@@ -2339,7 +2339,7 @@ bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {
unsigned Opc = OpcTable[IsBitTest][IsCmpNE][Is64Bit];
const MCInstrDesc &II = TII.get(Opc);
- unsigned SrcReg = getRegForValue(LHS);
+ Register SrcReg = getRegForValue(LHS);
if (!SrcReg)
return false;
@@ -2454,7 +2454,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
if (foldXALUIntrinsic(CC, I, BI->getCondition())) {
// Fake request the condition, otherwise the intrinsic might be completely
// optimized away.
- unsigned CondReg = getRegForValue(BI->getCondition());
+ Register CondReg = getRegForValue(BI->getCondition());
if (!CondReg)
return false;
@@ -2468,7 +2468,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
}
}
- unsigned CondReg = getRegForValue(BI->getCondition());
+ Register CondReg = getRegForValue(BI->getCondition());
if (CondReg == 0)
return false;
@@ -2480,7 +2480,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
}
const MCInstrDesc &II = TII.get(Opcode);
- unsigned ConstrainedCondReg
+ Register ConstrainedCondReg
= constrainOperandRegClass(II, CondReg, II.getNumDefs());
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(ConstrainedCondReg)
@@ -2493,7 +2493,7 @@ bool AArch64FastISel::selectBranch(const Instruction *I) {
bool AArch64FastISel::selectIndirectBr(const Instruction *I) {
const IndirectBrInst *BI = cast<IndirectBrInst>(I);
- unsigned AddrReg = getRegForValue(BI->getOperand(0));
+ Register AddrReg = getRegForValue(BI->getOperand(0));
if (AddrReg == 0)
return false;
@@ -2563,7 +2563,7 @@ bool AArch64FastISel::selectCmp(const Instruction *I) {
}
if (CondCodes) {
- unsigned TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
+ Register TmpReg1 = createResultReg(&AArch64::GPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::CSINCWr),
TmpReg1)
.addReg(AArch64::WZR, getKillRegState(true))
@@ -2630,18 +2630,18 @@ bool AArch64FastISel::optimizeSelect(const SelectInst *SI) {
if (!Opc)
return false;
- unsigned Src1Reg = getRegForValue(Src1Val);
+ Register Src1Reg = getRegForValue(Src1Val);
if (!Src1Reg)
return false;
- unsigned Src2Reg = getRegForValue(Src2Val);
+ Register Src2Reg = getRegForValue(Src2Val);
if (!Src2Reg)
return false;
if (NeedExtraOp)
Src1Reg = emitLogicalOp_ri(ISD::XOR, MVT::i32, Src1Reg, 1);
- unsigned ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
+ Register ResultReg = fastEmitInst_rr(Opc, &AArch64::GPR32RegClass, Src1Reg,
Src2Reg);
updateValueMap(SI, ResultReg);
return true;
@@ -2690,7 +2690,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
// Try to pickup the flags, so we don't have to emit another compare.
if (foldXALUIntrinsic(CC, I, Cond)) {
// Fake request the condition to force emission of the XALU intrinsic.
- unsigned CondReg = getRegForValue(Cond);
+ Register CondReg = getRegForValue(Cond);
if (!CondReg)
return false;
} else if (isa<CmpInst>(Cond) && cast<CmpInst>(Cond)->hasOneUse() &&
@@ -2711,7 +2711,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
}
if (FoldSelect) {
- unsigned SrcReg = getRegForValue(FoldSelect);
+ Register SrcReg = getRegForValue(FoldSelect);
if (!SrcReg)
return false;
@@ -2739,7 +2739,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
}
assert((CC != AArch64CC::AL) && "Unexpected condition code.");
} else {
- unsigned CondReg = getRegForValue(Cond);
+ Register CondReg = getRegForValue(Cond);
if (!CondReg)
return false;
@@ -2753,8 +2753,8 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
.addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
}
- unsigned Src1Reg = getRegForValue(SI->getTrueValue());
- unsigned Src2Reg = getRegForValue(SI->getFalseValue());
+ Register Src1Reg = getRegForValue(SI->getTrueValue());
+ Register Src2Reg = getRegForValue(SI->getFalseValue());
if (!Src1Reg || !Src2Reg)
return false;
@@ -2762,7 +2762,7 @@ bool AArch64FastISel::selectSelect(const Instruction *I) {
if (ExtraCC != AArch64CC::AL)
Src2Reg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, ExtraCC);
- unsigned ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, CC);
+ Register ResultReg = fastEmitInst_rri(Opc, RC, Src1Reg, Src2Reg, CC);
updateValueMap(I, ResultReg);
return true;
}
@@ -2772,11 +2772,11 @@ bool AArch64FastISel::selectFPExt(const Instruction *I) {
if (!I->getType()->isDoubleTy() || !V->getType()->isFloatTy())
return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0)
return false;
- unsigned ResultReg = createResultReg(&AArch64::FPR64RegClass);
+ Register ResultReg = createResultReg(&AArch64::FPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTDSr),
ResultReg).addReg(Op);
updateValueMap(I, ResultReg);
@@ -2788,11 +2788,11 @@ bool AArch64FastISel::selectFPTrunc(const Instruction *I) {
if (!I->getType()->isFloatTy() || !V->getType()->isDoubleTy())
return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0)
return false;
- unsigned ResultReg = createResultReg(&AArch64::FPR32RegClass);
+ Register ResultReg = createResultReg(&AArch64::FPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::FCVTSDr),
ResultReg).addReg(Op);
updateValueMap(I, ResultReg);
@@ -2805,7 +2805,7 @@ bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {
if (!isTypeLegal(I->getType(), DestVT) || DestVT.isVector())
return false;
- unsigned SrcReg = getRegForValue(I->getOperand(0));
+ Register SrcReg = getRegForValue(I->getOperand(0));
if (SrcReg == 0)
return false;
@@ -2825,7 +2825,7 @@ bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {
else
Opc = (DestVT == MVT::i32) ? AArch64::FCVTZUUWSr : AArch64::FCVTZUUXSr;
}
- unsigned ResultReg = createResultReg(
+ Register ResultReg = createResultReg(
DestVT == MVT::i32 ? &AArch64::GPR32RegClass : &AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg);
@@ -2844,7 +2844,7 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {
assert((DestVT == MVT::f32 || DestVT == MVT::f64) &&
"Unexpected value type.");
- unsigned SrcReg = getRegForValue(I->getOperand(0));
+ Register SrcReg = getRegForValue(I->getOperand(0));
if (!SrcReg)
return false;
@@ -2871,7 +2871,7 @@ bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {
Opc = (DestVT == MVT::f32) ? AArch64::UCVTFUWSri : AArch64::UCVTFUWDri;
}
- unsigned ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg);
+ Register ResultReg = fastEmitInst_r(Opc, TLI.getRegClassFor(DestVT), SrcReg);
updateValueMap(I, ResultReg);
return true;
}
@@ -2975,11 +2975,11 @@ bool AArch64FastISel::fastLowerArguments() {
} else
llvm_unreachable("Unexpected value type.");
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
@@ -3009,7 +3009,7 @@ bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
const Value *ArgVal = CLI.OutVals[VA.getValNo()];
MVT ArgVT = OutVTs[VA.getValNo()];
- unsigned ArgReg = getRegForValue(ArgVal);
+ Register ArgReg = getRegForValue(ArgVal);
if (!ArgReg)
return false;
@@ -3104,7 +3104,7 @@ bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
if (CopyVT.isVector() && !Subtarget->isLittleEndian())
return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(RVLocs[0].getLocReg());
@@ -3209,14 +3209,14 @@ bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {
else if (Addr.getGlobalValue())
MIB.addGlobalAddress(Addr.getGlobalValue(), 0, 0);
else if (Addr.getReg()) {
- unsigned Reg = constrainOperandRegClass(II, Addr.getReg(), 0);
+ Register Reg = constrainOperandRegClass(II, Addr.getReg(), 0);
MIB.addReg(Reg);
} else
return false;
} else {
unsigned CallReg = 0;
if (Symbol) {
- unsigned ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
+ Register ADRPReg = createResultReg(&AArch64::GPR64commonRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AArch64::ADRP),
ADRPReg)
.addSym(Symbol, AArch64II::MO_GOT | AArch64II::MO_PAGE);
@@ -3438,7 +3438,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// SP = FP + Fixed Object + 16
int FI = MFI.CreateFixedObject(4, 0, false);
- unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64spRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::ADDXri), ResultReg)
.addFrameIndex(FI)
@@ -3568,10 +3568,10 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
Opc = AArch64::FABSDr;
break;
}
- unsigned SrcReg = getRegForValue(II->getOperand(0));
+ Register SrcReg = getRegForValue(II->getOperand(0));
if (!SrcReg)
return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg);
updateValueMap(II, ResultReg);
@@ -3593,7 +3593,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeLegal(RetTy, VT))
return false;
- unsigned Op0Reg = getRegForValue(II->getOperand(0));
+ Register Op0Reg = getRegForValue(II->getOperand(0));
if (!Op0Reg)
return false;
@@ -3671,17 +3671,17 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
break;
case Intrinsic::smul_with_overflow: {
CC = AArch64CC::NE;
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return false;
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return false;
if (VT == MVT::i32) {
MulReg = emitSMULL_rr(MVT::i64, LHSReg, RHSReg);
- unsigned MulSubReg =
+ Register MulSubReg =
fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32);
// cmp xreg, wreg, sxtw
emitAddSub_rx(/*UseAdd=*/false, MVT::i64, MulReg, MulSubReg,
@@ -3701,11 +3701,11 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
}
case Intrinsic::umul_with_overflow: {
CC = AArch64CC::NE;
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return false;
- unsigned RHSReg = getRegForValue(RHS);
+ Register RHSReg = getRegForValue(RHS);
if (!RHSReg)
return false;
@@ -3799,7 +3799,7 @@ bool AArch64FastISel::selectRet(const Instruction *I) {
if (!VA.isRegLoc())
return false;
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -3879,7 +3879,7 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) {
DestVT != MVT::i1)
return false;
- unsigned SrcReg = getRegForValue(Op);
+ Register SrcReg = getRegForValue(Op);
if (!SrcReg)
return false;
@@ -3906,7 +3906,7 @@ bool AArch64FastISel::selectTrunc(const Instruction *I) {
break;
}
// Issue an extract_subreg to get the lower 32-bits.
- unsigned Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg,
+ Register Reg32 = fastEmitInst_extractsubreg(MVT::i32, SrcReg,
AArch64::sub_32);
// Create the AND instruction which performs the actual truncation.
ResultReg = emitAnd_ri(MVT::i32, Reg32, Mask);
@@ -4007,7 +4007,7 @@ unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg,
if (NeedTrunc)
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
+ Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
if (NeedTrunc)
ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
return ResultReg;
@@ -4033,7 +4033,7 @@ unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
// Just emit a copy for "zero" shifts.
if (Shift == 0) {
if (RetVT == SrcVT) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0);
@@ -4110,7 +4110,7 @@ unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg,
Op0Reg = emitAnd_ri(MVT::i32, Op0Reg, Mask);
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
}
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
+ Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
if (NeedTrunc)
ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
return ResultReg;
@@ -4136,7 +4136,7 @@ unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
// Just emit a copy for "zero" shifts.
if (Shift == 0) {
if (RetVT == SrcVT) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0);
@@ -4226,7 +4226,7 @@ unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg,
Op0Reg = emitIntExt(RetVT, Op0Reg, MVT::i32, /*isZExt=*/false);
Op1Reg = emitAnd_ri(MVT::i32, Op1Reg, Mask);
}
- unsigned ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
+ Register ResultReg = fastEmitInst_rr(Opc, RC, Op0Reg, Op1Reg);
if (NeedTrunc)
ResultReg = emitAnd_ri(MVT::i32, ResultReg, Mask);
return ResultReg;
@@ -4252,7 +4252,7 @@ unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
// Just emit a copy for "zero" shifts.
if (Shift == 0) {
if (RetVT == SrcVT) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0);
@@ -4428,7 +4428,7 @@ bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
return false;
// Check if the load instruction has already been selected.
- unsigned Reg = lookUpRegForValue(LI);
+ Register Reg = lookUpRegForValue(LI);
if (!Reg)
return false;
@@ -4456,7 +4456,7 @@ bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
}
if (IsZExt) {
- unsigned Reg64 = createResultReg(&AArch64::GPR64RegClass);
+ Register Reg64 = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), Reg64)
.addImm(0)
@@ -4490,7 +4490,7 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) {
if (optimizeIntExtLoad(I, RetVT, SrcVT))
return true;
- unsigned SrcReg = getRegForValue(I->getOperand(0));
+ Register SrcReg = getRegForValue(I->getOperand(0));
if (!SrcReg)
return false;
@@ -4499,7 +4499,7 @@ bool AArch64FastISel::selectIntExt(const Instruction *I) {
if (const auto *Arg = dyn_cast<Argument>(I->getOperand(0))) {
if ((IsZExt && Arg->hasZExtAttr()) || (!IsZExt && Arg->hasSExtAttr())) {
if (RetVT == MVT::i64 && SrcVT != MVT::i64) {
- unsigned ResultReg = createResultReg(&AArch64::GPR64RegClass);
+ Register ResultReg = createResultReg(&AArch64::GPR64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AArch64::SUBREG_TO_REG), ResultReg)
.addImm(0)
@@ -4543,21 +4543,21 @@ bool AArch64FastISel::selectRem(const Instruction *I, unsigned ISDOpcode) {
break;
}
unsigned MSubOpc = Is64bit ? AArch64::MSUBXrrr : AArch64::MSUBWrrr;
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
- unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ Register Src1Reg = getRegForValue(I->getOperand(1));
if (!Src1Reg)
return false;
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
- unsigned QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg);
+ Register QuotReg = fastEmitInst_rr(DivOpc, RC, Src0Reg, Src1Reg);
assert(QuotReg && "Unexpected DIV instruction emission failure.");
// The remainder is computed as numerator - (quotient * denominator) using the
// MSUB instruction.
- unsigned ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg);
+ Register ResultReg = fastEmitInst_rrr(MSubOpc, RC, QuotReg, Src1Reg, Src0Reg);
updateValueMap(I, ResultReg);
return true;
}
@@ -4602,7 +4602,7 @@ bool AArch64FastISel::selectMul(const Instruction *I) {
}
}
- unsigned Src0Reg = getRegForValue(Src0);
+ Register Src0Reg = getRegForValue(Src0);
if (!Src0Reg)
return false;
@@ -4615,11 +4615,11 @@ bool AArch64FastISel::selectMul(const Instruction *I) {
}
}
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
- unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ Register Src1Reg = getRegForValue(I->getOperand(1));
if (!Src1Reg)
return false;
@@ -4666,7 +4666,7 @@ bool AArch64FastISel::selectShift(const Instruction *I) {
}
}
- unsigned Op0Reg = getRegForValue(Op0);
+ Register Op0Reg = getRegForValue(Op0);
if (!Op0Reg)
return false;
@@ -4689,11 +4689,11 @@ bool AArch64FastISel::selectShift(const Instruction *I) {
return true;
}
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ Register Op0Reg = getRegForValue(I->getOperand(0));
if (!Op0Reg)
return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (!Op1Reg)
return false;
@@ -4746,11 +4746,11 @@ bool AArch64FastISel::selectBitCast(const Instruction *I) {
case MVT::f32: RC = &AArch64::FPR32RegClass; break;
case MVT::f64: RC = &AArch64::FPR64RegClass; break;
}
- unsigned Op0Reg = getRegForValue(I->getOperand(0));
+ Register Op0Reg = getRegForValue(I->getOperand(0));
if (!Op0Reg)
return false;
- unsigned ResultReg = fastEmitInst_r(Opc, RC, Op0Reg);
+ Register ResultReg = fastEmitInst_r(Opc, RC, Op0Reg);
if (!ResultReg)
return false;
@@ -4810,7 +4810,7 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) {
return selectBinaryOp(I, ISD::SDIV);
unsigned Lg2 = C.countTrailingZeros();
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
if (!Src0Reg)
return false;
@@ -4840,7 +4840,7 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) {
SelectOpc = AArch64::CSELWr;
RC = &AArch64::GPR32RegClass;
}
- unsigned SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg,
+ Register SelectReg = fastEmitInst_rri(SelectOpc, RC, AddReg, Src0Reg,
AArch64CC::LT);
if (!SelectReg)
return false;
@@ -4866,7 +4866,7 @@ bool AArch64FastISel::selectSDiv(const Instruction *I) {
/// have to duplicate it for AArch64, because otherwise we would fail during the
/// sign-extend emission.
unsigned AArch64FastISel::getRegForGEPIndex(const Value *Idx) {
- unsigned IdxN = getRegForValue(Idx);
+ Register IdxN = getRegForValue(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return 0;
@@ -4889,7 +4889,7 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {
if (Subtarget->isTargetILP32())
return false;
- unsigned N = getRegForValue(I->getOperand(0));
+ Register N = getRegForValue(I->getOperand(0));
if (!N)
return false;
@@ -4983,16 +4983,16 @@ bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) {
const MCInstrDesc &II = TII.get(Opc);
- const unsigned AddrReg = constrainOperandRegClass(
+ const Register AddrReg = constrainOperandRegClass(
II, getRegForValue(I->getPointerOperand()), II.getNumDefs());
- const unsigned DesiredReg = constrainOperandRegClass(
+ const Register DesiredReg = constrainOperandRegClass(
II, getRegForValue(I->getCompareOperand()), II.getNumDefs() + 1);
- const unsigned NewReg = constrainOperandRegClass(
+ const Register NewReg = constrainOperandRegClass(
II, getRegForValue(I->getNewValOperand()), II.getNumDefs() + 2);
- const unsigned ResultReg1 = createResultReg(ResRC);
- const unsigned ResultReg2 = createResultReg(&AArch64::GPR32RegClass);
- const unsigned ScratchReg = createResultReg(&AArch64::GPR32RegClass);
+ const Register ResultReg1 = createResultReg(ResRC);
+ const Register ResultReg2 = createResultReg(&AArch64::GPR32RegClass);
+ const Register ScratchReg = createResultReg(&AArch64::GPR32RegClass);
// FIXME: MachineMemOperand doesn't support cmpxchg yet.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 1af131797e636..a4d20735e2b15 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -547,7 +547,7 @@ void AArch64FrameLowering::emitCalleeSavedFrameMoves(
return;
for (const auto &Info : CSI) {
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
// Not all unwinders may know about SVE registers, so assume the lowest
// common demoninator.
@@ -2324,7 +2324,7 @@ static void computeCalleeSaveRegisterPairs(
// Add the next reg to the pair if it is in the same register class.
if (unsigned(i + RegInc) < Count) {
- unsigned NextReg = CSI[i + RegInc].getReg();
+ Register NextReg = CSI[i + RegInc].getReg();
bool IsFirst = i == FirstReg;
switch (RPI.Type) {
case RegPairInfo::GPR:
@@ -3128,7 +3128,7 @@ void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
DebugLoc DL;
RS->enterBasicBlockEnd(MBB);
RS->backward(std::prev(MBBI));
- unsigned DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
+ Register DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
assert(DstReg && "There must be a free register after frame setup");
BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2);
BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi))
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4158201ca6ce7..e2ef4ff786376 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -5381,7 +5381,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
// Transform the arguments in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
// If this is an 8, 16 or 32-bit value, it is really passed promoted
@@ -5543,7 +5543,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// Conservatively forward X8, since it might be used for aggregate return.
if (!CCInfo.isAllocated(AArch64::X8)) {
- unsigned X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
+ Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
}
}
@@ -5627,7 +5627,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
+ Register VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), DL, Val, FIN,
@@ -5657,7 +5657,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT);
for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
- unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
+ Register VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
@@ -8258,7 +8258,7 @@ SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
} else {
// Return LR, which contains the return address. Mark it an implicit
// live-in.
- unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
+ Register Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
ReturnAddress = DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
}
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 3a836ac33064e..6aefc1fdb599a 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -1139,7 +1139,7 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
? getLdStOffsetOp(*StoreI).getImm()
: getLdStOffsetOp(*StoreI).getImm() * StoreSize;
int Width = LoadSize * 8;
- unsigned DestReg =
+ Register DestReg =
IsStoreXReg ? Register(TRI->getMatchingSuperReg(
LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
: LdRt;
diff --git a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
index 7307961ddb5fd..87be7bb6d113e 100644
--- a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
+++ b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp
@@ -304,7 +304,7 @@ bool AArch64SpeculationHardening::instrumentControlFlow(
// sure if that would actually result in a big performance
diff erence
// though. Maybe RegisterScavenger::findSurvivorBackwards has some logic
// already to do this - but it's unclear if that could easily be used here.
- unsigned TmpReg = RS.FindUnusedReg(&AArch64::GPR64commonRegClass);
+ Register TmpReg = RS.FindUnusedReg(&AArch64::GPR64commonRegClass);
LLVM_DEBUG(dbgs() << "RS finds "
<< ((TmpReg == 0) ? "no register " : "register ");
if (TmpReg != 0) dbgs() << printReg(TmpReg, TRI) << " ";
diff --git a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
index d2488f61eb4be..cae6d65bed2df 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
@@ -195,7 +195,7 @@ void AArch64StackTaggingPreRA::uncheckUsesOf(unsigned TaggedReg, int FI) {
void AArch64StackTaggingPreRA::uncheckLoadsAndStores() {
for (auto *I : ReTags) {
- unsigned TaggedReg = I->getOperand(0).getReg();
+ Register TaggedReg = I->getOperand(0).getReg();
int FI = I->getOperand(1).getIndex();
uncheckUsesOf(TaggedReg, FI);
}
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 0f8dd0b3bf589..d0ad13bb75064 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -1438,7 +1438,7 @@ int GCNHazardRecognizer::checkMAIHazards90A(MachineInstr *MI) {
if (!Use.isReg())
continue;
- unsigned Reg = Use.getReg();
+ Register Reg = Use.getReg();
bool FullReg;
const MachineInstr *MI1;
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index 82c09378acac8..fb106d98c162e 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -27,7 +27,7 @@ void llvm::printLivesAt(SlotIndex SI,
<< *LIS.getInstructionFromIndex(SI);
unsigned Num = 0;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- const unsigned Reg = Register::index2VirtReg(I);
+ const Register Reg = Register::index2VirtReg(I);
if (!LIS.hasInterval(Reg))
continue;
const auto &LI = LIS.getInterval(Reg);
@@ -487,7 +487,7 @@ void GCNRPTracker::printLiveRegs(raw_ostream &OS, const LiveRegSet& LiveRegs,
const MachineRegisterInfo &MRI) {
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
auto It = LiveRegs.find(Reg);
if (It != LiveRegs.end() && It->second.any())
OS << ' ' << printVRegOrUnit(Reg, TRI) << ':'
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index f8b4d3e3319ea..0fbdbef6fcce6 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -127,7 +127,7 @@ static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
// FIXME: Just emit the readlane/writelane directly
if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
for (const CalleeSavedInfo &CI : reverse(CSI)) {
- unsigned Reg = CI.getReg();
+ Register Reg = CI.getReg();
const TargetRegisterClass *RC =
TRI->getMinimalPhysRegClass(Reg, MVT::i32);
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 663eceae6ee76..2f083561bbd40 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -1144,7 +1144,7 @@ static bool determineFPRegsToClear(const MachineInstr &MI,
if (!Op.isReg())
continue;
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
if (Op.isDef()) {
if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
(Reg >= ARM::D0 && Reg <= ARM::D15) ||
@@ -1356,7 +1356,7 @@ void ARMExpandPseudo::CMSESaveClearFPRegsV8(
std::vector<unsigned> NonclearedFPRegs;
for (const MachineOperand &Op : MBBI->operands()) {
if (Op.isReg() && Op.isUse()) {
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
assert(!ARM::DPRRegClass.contains(Reg) ||
ARM::DPR_VFP2RegClass.contains(Reg));
assert(!ARM::QPRRegClass.contains(Reg));
@@ -1543,7 +1543,7 @@ void ARMExpandPseudo::CMSERestoreFPRegsV8(
std::vector<unsigned> NonclearedFPRegs;
for (const MachineOperand &Op : MBBI->operands()) {
if (Op.isReg() && Op.isDef()) {
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
assert(!ARM::DPRRegClass.contains(Reg) ||
ARM::DPR_VFP2RegClass.contains(Reg));
assert(!ARM::QPRRegClass.contains(Reg));
@@ -1663,7 +1663,7 @@ static bool definesOrUsesFPReg(const MachineInstr &MI) {
for (const MachineOperand &Op : MI.operands()) {
if (!Op.isReg())
continue;
- unsigned Reg = Op.getReg();
+ Register Reg = Op.getReg();
if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
(Reg >= ARM::D0 && Reg <= ARM::D15) ||
(Reg >= ARM::S0 && Reg <= ARM::S31))
@@ -2201,7 +2201,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
}
case ARM::tBLXNS_CALL: {
DebugLoc DL = MBBI->getDebugLoc();
- unsigned JumpReg = MBBI->getOperand(0).getReg();
+ Register JumpReg = MBBI->getOperand(0).getReg();
// Figure out which registers are live at the point immediately before the
// call. When we indiscriminately push a set of registers, the live
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index 9224c2221f4d7..5d94b99d4c5d9 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -319,7 +319,7 @@ unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, unsigned Op1) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
// Make sure the input operands are sufficiently constrained to be legal
@@ -346,7 +346,7 @@ unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, uint64_t Imm) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
// Make sure the input operand is sufficiently constrained to be legal
@@ -371,7 +371,7 @@ unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
uint64_t Imm) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) {
@@ -392,7 +392,7 @@ unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::f64) return 0;
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(SrcReg));
@@ -402,7 +402,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::i64) return 0;
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
@@ -428,7 +428,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
Imm = ARM_AM::getFP32Imm(Val);
Opc = ARM::FCONSTS;
}
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register DestReg = createResultReg(TLI.getRegClassFor(VT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), DestReg).addImm(Imm));
return DestReg;
@@ -440,7 +440,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
// MachineConstantPool wants an explicit alignment.
Align Alignment = DL.getPrefTypeAlign(CFP->getType());
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register DestReg = createResultReg(TLI.getRegClassFor(VT));
unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
// The extra reg is for addrmode5.
@@ -462,7 +462,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
&ARM::GPRRegClass;
- unsigned ImmReg = createResultReg(RC);
+ Register ImmReg = createResultReg(RC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(CI->getZExtValue()));
@@ -478,7 +478,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
&ARM::GPRRegClass;
- unsigned ImmReg = createResultReg(RC);
+ Register ImmReg = createResultReg(RC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(Imm));
@@ -531,7 +531,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
: &ARM::GPRRegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
// FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
@@ -589,7 +589,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
if (IsPositionIndependent) {
unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
- unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DbgLoc, TII.get(Opc), NewDestReg)
@@ -605,7 +605,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
(Subtarget->isTargetMachO() && IsIndirect) ||
Subtarget->genLongCalls()) {
MachineInstrBuilder MIB;
- unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
if (isThumb2)
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRi12), NewDestReg)
@@ -657,7 +657,7 @@ unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
if (SI != FuncInfo.StaticAllocaMap.end()) {
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -832,7 +832,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
: &ARM::GPRRegClass;
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
@@ -991,7 +991,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
// If we had an unaligned load of a float we've converted it to an regular
// load. Now we must move from the GRP to the FP register.
if (needVMOV) {
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(ResultReg));
@@ -1044,7 +1044,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
- unsigned Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
+ Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
: &ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
@@ -1095,7 +1095,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
if (!Subtarget->hasVFP2Base()) return false;
// Unaligned stores need special handling. Floats require word-alignment.
if (Alignment && Alignment < 4) {
- unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
+ Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
@@ -1257,7 +1257,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
(isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
- unsigned OpReg = getRegForValue(TI->getOperand(0));
+ Register OpReg = getRegForValue(TI->getOperand(0));
OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
@@ -1284,7 +1284,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
return true;
}
- unsigned CmpReg = getRegForValue(BI->getCondition());
+ Register CmpReg = getRegForValue(BI->getCondition());
if (CmpReg == 0) return false;
// We've been divorced from our compare! Our block was split, and
@@ -1315,7 +1315,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
- unsigned AddrReg = getRegForValue(I->getOperand(0));
+ Register AddrReg = getRegForValue(I->getOperand(0));
if (AddrReg == 0) return false;
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
@@ -1406,7 +1406,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
break;
}
- unsigned SrcReg1 = getRegForValue(Src1Value);
+ Register SrcReg1 = getRegForValue(Src1Value);
if (SrcReg1 == 0) return false;
unsigned SrcReg2 = 0;
@@ -1468,7 +1468,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
: &ARM::GPRRegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = fastMaterializeConstant(Zero);
// ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
@@ -1488,10 +1488,10 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) {
if (!I->getType()->isDoubleTy() ||
!V->getType()->isFloatTy()) return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0) return false;
- unsigned Result = createResultReg(&ARM::DPRRegClass);
+ Register Result = createResultReg(&ARM::DPRRegClass);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTDS), Result)
.addReg(Op));
@@ -1507,10 +1507,10 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
if (!(I->getType()->isFloatTy() &&
V->getType()->isDoubleTy())) return false;
- unsigned Op = getRegForValue(V);
+ Register Op = getRegForValue(V);
if (Op == 0) return false;
- unsigned Result = createResultReg(&ARM::SPRRegClass);
+ Register Result = createResultReg(&ARM::SPRRegClass);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTSD), Result)
.addReg(Op));
@@ -1535,7 +1535,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0) return false;
// Handle sign-extension.
@@ -1556,7 +1556,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
else return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg).addReg(FP));
updateValueMap(I, ResultReg);
@@ -1572,7 +1572,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
if (!isTypeLegal(RetTy, DstVT))
return false;
- unsigned Op = getRegForValue(I->getOperand(0));
+ Register Op = getRegForValue(I->getOperand(0));
if (Op == 0) return false;
unsigned Opc;
@@ -1583,7 +1583,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
else return false;
// f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg).addReg(Op));
@@ -1604,9 +1604,9 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
// Things need to be register sized for register moves.
if (VT != MVT::i32) return false;
- unsigned CondReg = getRegForValue(I->getOperand(0));
+ Register CondReg = getRegForValue(I->getOperand(0));
if (CondReg == 0) return false;
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
// Check to see if we can use an immediate in the conditional move.
@@ -1649,7 +1649,7 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
else
MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
if (!UseImm) {
Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
@@ -1752,15 +1752,15 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
break;
}
- unsigned SrcReg1 = getRegForValue(I->getOperand(0));
+ Register SrcReg1 = getRegForValue(I->getOperand(0));
if (SrcReg1 == 0) return false;
// TODO: Often the 2nd operand is an immediate, which can be encoded directly
// in the instruction, rather then materializing the value in a register.
- unsigned SrcReg2 = getRegForValue(I->getOperand(1));
+ Register SrcReg2 = getRegForValue(I->getOperand(1));
if (SrcReg2 == 0) return false;
- unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
+ Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1803,13 +1803,13 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
Opc = is64bit ? ARM::VMULD : ARM::VMULS;
break;
}
- unsigned Op1 = getRegForValue(I->getOperand(0));
+ Register Op1 = getRegForValue(I->getOperand(0));
if (Op1 == 0) return false;
- unsigned Op2 = getRegForValue(I->getOperand(1));
+ Register Op2 = getRegForValue(I->getOperand(1));
if (Op2 == 0) return false;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(Op1).addReg(Op2));
@@ -2101,7 +2101,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
F.isVarArg()));
const Value *RV = Ret->getOperand(0);
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -2226,7 +2226,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
ArgVTs.reserve(I->getNumOperands());
ArgFlags.reserve(I->getNumOperands());
for (Value *Op : I->operands()) {
- unsigned Arg = getRegForValue(Op);
+ Register Arg = getRegForValue(Op);
if (Arg == 0) return false;
Type *ArgTy = Op->getType();
@@ -2588,7 +2588,7 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) {
if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
return false;
- unsigned SrcReg = getRegForValue(Op);
+ Register SrcReg = getRegForValue(Op);
if (!SrcReg) return false;
// Because the high bits are undefined, a truncate doesn't generate
@@ -2744,7 +2744,7 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
Type *SrcTy = Src->getType();
bool isZExt = isa<ZExtInst>(I);
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg) return false;
EVT SrcEVT, DestEVT;
@@ -2788,7 +2788,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
}
Value *Src1Value = I->getOperand(0);
- unsigned Reg1 = getRegForValue(Src1Value);
+ Register Reg1 = getRegForValue(Src1Value);
if (Reg1 == 0) return false;
unsigned Reg2 = 0;
@@ -2797,7 +2797,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
if (Reg2 == 0) return false;
}
- unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
+ Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
if(ResultReg == 0) return false;
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -2975,7 +2975,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
MIB.add(predOps(ARMCC::AL));
// Fix the address by adding pc.
- unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register DestReg = createResultReg(TLI.getRegClassFor(VT));
Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
: ARM::PICADD;
DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0);
@@ -2987,7 +2987,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
MIB.add(predOps(ARMCC::AL));
if (UseGOT_PREL && Subtarget->isThumb()) {
- unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
+ Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRi12), NewDestReg)
.addReg(DestReg)
@@ -3057,11 +3057,11 @@ bool ARMFastISel::fastLowerArguments() {
for (const Argument &Arg : F->args()) {
unsigned ArgNo = Arg.getArgNo();
unsigned SrcReg = GPRArgRegs[ArgNo];
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY),
ResultReg).addReg(DstReg, getKillRegState(true));
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 1bf2bb6d1ecad..1f2f6f7497e02 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -516,7 +516,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// Determine spill area sizes.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -751,7 +751,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock::iterator Pos = std::next(GPRCS1Push);
int CFIIndex;
for (const auto &Entry : CSI) {
- unsigned Reg = Entry.getReg();
+ Register Reg = Entry.getReg();
int FI = Entry.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -784,7 +784,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
if (GPRCS2Size > 0) {
MachineBasicBlock::iterator Pos = std::next(GPRCS2Push);
for (const auto &Entry : CSI) {
- unsigned Reg = Entry.getReg();
+ Register Reg = Entry.getReg();
int FI = Entry.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -794,7 +794,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
case ARM::R12:
if (STI.splitFramePushPop(MF)) {
unsigned DwarfReg = MRI->getDwarfRegNum(
- Reg == ARM::R12 ? (unsigned)ARM::RA_AUTH_CODE : Reg, true);
+ Reg == ARM::R12 ? ARM::RA_AUTH_CODE : Reg, true);
unsigned Offset = MFI.getObjectOffset(FI);
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
@@ -812,7 +812,7 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
// instructions in the prologue.
MachineBasicBlock::iterator Pos = std::next(LastPush);
for (const auto &Entry : CSI) {
- unsigned Reg = Entry.getReg();
+ Register Reg = Entry.getReg();
int FI = Entry.getFrameIdx();
if ((Reg >= ARM::D0 && Reg <= ARM::D31) &&
(Reg < ARM::D8 || Reg >= ARM::D8 + AFI->getNumAlignedDPRCS2Regs())) {
@@ -1144,7 +1144,7 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
while (i != 0) {
unsigned LastReg = 0;
for (; i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
+ Register Reg = CSI[i-1].getReg();
if (!(Func)(Reg, STI.splitFramePushPop(MF))) continue;
// D-registers in the aligned area DPRCS2 are NOT spilled here.
@@ -1237,7 +1237,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
bool DeleteRet = false;
for (; i != 0; --i) {
CalleeSavedInfo &Info = CSI[i-1];
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
if (!(Func)(Reg, STI.splitFramePushPop(MF))) continue;
// The aligned reloads from area DPRCS2 are not inserted here.
@@ -2353,7 +2353,7 @@ bool ARMFrameLowering::assignCalleeSavedSpillSlots(
// LR, R7, R6, R5, R4, <R12>, R11, R10, R9, R8, D15-D8
CSI.insert(find_if(CSI,
[=](const auto &CS) {
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
return Reg == ARM::R10 || Reg == ARM::R11 ||
Reg == ARM::R8 || Reg == ARM::R9 ||
ARM::DPRRegClass.contains(Reg);
diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 470f3f05bd960..98c8133282a26 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -5800,8 +5800,8 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){
assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
SDValue V0 = N->getOperand(i+1);
SDValue V1 = N->getOperand(i+2);
- unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
- unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
+ Register Reg0 = cast<RegisterSDNode>(V0)->getReg();
+ Register Reg1 = cast<RegisterSDNode>(V1)->getReg();
SDValue PairedReg;
MachineRegisterInfo &MRI = MF->getRegInfo();
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index d1ae67dae1e23..b525c1dc60389 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2899,7 +2899,7 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
unsigned Bytes = Arg.getValueSizeInBits() / 8;
int FI = std::numeric_limits<int>::max();
if (Arg.getOpcode() == ISD::CopyFromReg) {
- unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
+ Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
if (!Register::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
@@ -4018,7 +4018,7 @@ SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
assert(Mask && "Missing call preserved mask for calling convention");
// Mark LR an implicit live-in.
- unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
SDValue ReturnAddress =
DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue};
@@ -4272,7 +4272,7 @@ SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
RC = &ARM::GPRRegClass;
// Transform the arguments stored in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
SDValue ArgValue2;
@@ -4342,7 +4342,7 @@ int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
- unsigned VReg = MF.addLiveIn(Reg, RC);
+ Register VReg = MF.addLiveIn(Reg, RC);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo(OrigArg, 4 * i));
@@ -4527,7 +4527,7 @@ SDValue ARMTargetLowering::LowerFormalArguments(
llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
// Transform the arguments in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this value is passed in r0 and has the returned attribute (e.g.
@@ -6065,7 +6065,7 @@ SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
}
// Return LR, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
diff --git a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
index 54e80a095dd45..71a82a1e32715 100644
--- a/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
+++ b/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp
@@ -167,7 +167,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
DebugLoc dl;
Register FramePtr = RegInfo->getFrameRegister(MF);
- unsigned BasePtr = RegInfo->getBaseRegister();
+ Register BasePtr = RegInfo->getBaseRegister();
int CFAOffset = 0;
// Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
@@ -206,7 +206,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -267,7 +267,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
.setMIFlags(MachineInstr::FrameSetup);
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -348,7 +348,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
// Emit call frame information for the callee-saved high registers.
for (auto &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
int FI = I.getFrameIdx();
switch (Reg) {
case ARM::R8:
@@ -376,7 +376,7 @@ void Thumb1FrameLowering::emitPrologue(MachineFunction &MF,
// at this point in the prologue, so pick one.
unsigned ScratchRegister = ARM::NoRegister;
for (auto &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (isARMLowRegister(Reg) && !(HasFP && Reg == FramePtr)) {
ScratchRegister = Reg;
break;
@@ -531,7 +531,7 @@ void Thumb1FrameLowering::emitEpilogue(MachineFunction &MF,
unsigned ScratchRegister = ARM::NoRegister;
bool HasFP = hasFP(MF);
for (auto &I : MFI.getCalleeSavedInfo()) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (isARMLowRegister(Reg) && !(HasFP && Reg == FramePtr)) {
ScratchRegister = Reg;
break;
@@ -825,7 +825,7 @@ bool Thumb1FrameLowering::spillCalleeSavedRegisters(
// LoRegs for saving HiRegs.
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (ARM::tGPRRegClass.contains(Reg) || Reg == ARM::LR) {
LoRegsToSave[Reg] = true;
@@ -949,7 +949,7 @@ bool Thumb1FrameLowering::restoreCalleeSavedRegisters(
ARMRegSet CopyRegs;
for (CalleeSavedInfo I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (ARM::tGPRRegClass.contains(Reg) || Reg == ARM::LR) {
LoRegsToRestore[Reg] = true;
@@ -1022,7 +1022,7 @@ bool Thumb1FrameLowering::restoreCalleeSavedRegisters(
bool NeedsPop = false;
for (CalleeSavedInfo &Info : llvm::reverse(CSI)) {
- unsigned Reg = Info.getReg();
+ Register Reg = Info.getReg();
// High registers (excluding lr) have already been dealt with
if (!(ARM::tGPRRegClass.contains(Reg) || Reg == ARM::LR))
diff --git a/llvm/lib/Target/AVR/AVRFrameLowering.cpp b/llvm/lib/Target/AVR/AVRFrameLowering.cpp
index 543d948750379..74c45d32136da 100644
--- a/llvm/lib/Target/AVR/AVRFrameLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRFrameLowering.cpp
@@ -248,7 +248,7 @@ bool AVRFrameLowering::spillCalleeSavedRegisters(
AVRMachineFunctionInfo *AVRFI = MF.getInfo<AVRMachineFunctionInfo>();
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
bool IsNotLiveIn = !MBB.isLiveIn(Reg);
assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
@@ -286,7 +286,7 @@ bool AVRFrameLowering::restoreCalleeSavedRegisters(
const TargetInstrInfo &TII = *STI.getInstrInfo();
for (const CalleeSavedInfo &CCSI : CSI) {
- unsigned Reg = CCSI.getReg();
+ Register Reg = CCSI.getReg();
assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
"Invalid register size");
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index f7f560ff5db32..894f4829fe3f1 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -1192,7 +1192,7 @@ SDValue AVRTargetLowering::LowerFormalArguments(
llvm_unreachable("Unknown argument type!");
}
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// :NOTE: Clang should not promote any i8 into i16 but for safety the
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index a9520dcde88e2..989a98571434c 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1410,7 +1410,7 @@ bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// Add live in registers. We treat eh_return callee saved register r0 - r3
// specially. They are not really callee saved registers as they are not
// supposed to be killed.
@@ -1479,7 +1479,7 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
}
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
int FI = I.getFrameIdx();
HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
@@ -1620,7 +1620,7 @@ bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
// sub-registers to SRegs.
LLVM_DEBUG(dbgs() << "Initial CS registers: {");
for (const CalleeSavedInfo &I : CSI) {
- unsigned R = I.getReg();
+ Register R = I.getReg();
LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
SRegs[*SR] = true;
@@ -2635,7 +2635,7 @@ bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
// a contiguous block starting from D8.
BitVector Regs(Hexagon::NUM_TARGET_REGS);
for (const CalleeSavedInfo &I : CSI) {
- unsigned R = I.getReg();
+ Register R = I.getReg();
if (!Hexagon::DoubleRegsRegClass.contains(R))
return true;
Regs[R] = true;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 0225cc5f506ae..161768b8dc226 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -1285,7 +1285,7 @@ void HexagonDAGToDAGISel::emitFunctionEntryCode() {
MachineFrameInfo &MFI = MF->getFrameInfo();
MachineBasicBlock *EntryBB = &MF->front();
- unsigned AR = FuncInfo->CreateReg(MVT::i32);
+ Register AR = FuncInfo->CreateReg(MVT::i32);
Align EntryMaxA = MFI.getMaxAlign();
BuildMI(EntryBB, DebugLoc(), HII->get(Hexagon::PS_aligna), AR)
.addImm(EntryMaxA.value());
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 9aac770a43800..d7ca934a23e64 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -686,7 +686,7 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
case InlineAsm::Kind_RegDef:
case InlineAsm::Kind_RegDefEarlyClobber: {
for (; NumVals; --NumVals, ++i) {
- unsigned Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
+ Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
if (Reg != LR)
continue;
HMFI.setHasClobberLR(true);
@@ -1186,7 +1186,7 @@ HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
}
// Return LR, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(HRI.getRARegister(), getRegClassFor(MVT::i32));
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
index 9dedca4b11909..ada78ca70559e 100644
--- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
@@ -224,14 +224,14 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned NumRegs = MRI->getNumVirtRegs();
BitVector DoubleRegs(NumRegs);
for (unsigned i = 0; i < NumRegs; ++i) {
- unsigned R = Register::index2VirtReg(i);
+ Register R = Register::index2VirtReg(i);
if (MRI->getRegClass(R) == DoubleRC)
DoubleRegs.set(i);
}
BitVector FixedRegs(NumRegs);
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
- unsigned R = Register::index2VirtReg(x);
+ Register R = Register::index2VirtReg(x);
MachineInstr *DefI = MRI->getVRegDef(R);
// In some cases a register may exist, but never be defined or used.
// It should never appear anywhere, but mark it as "fixed", just to be
@@ -244,7 +244,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
if (FixedRegs[x])
continue;
- unsigned R = Register::index2VirtReg(x);
+ Register R = Register::index2VirtReg(x);
LLVM_DEBUG(dbgs() << printReg(R, TRI) << " ~~");
USet &Asc = AssocMap[R];
for (auto U = MRI->use_nodbg_begin(R), Z = MRI->use_nodbg_end();
@@ -281,7 +281,7 @@ void HexagonSplitDoubleRegs::partitionRegisters(UUSetMap &P2Rs) {
unsigned NextP = 1;
USet Visited;
for (int x = DoubleRegs.find_first(); x >= 0; x = DoubleRegs.find_next(x)) {
- unsigned R = Register::index2VirtReg(x);
+ Register R = Register::index2VirtReg(x);
if (Visited.count(R))
continue;
// Create a new partition for R.
diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
index 3e483e75ac6f2..010ff80ad42a5 100644
--- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
+++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp
@@ -511,7 +511,7 @@ SDValue LanaiTargetLowering::LowerCCCArguments(
// the sret argument into rv for the return. Save the argument into
// a virtual register so that we can access it from the return points.
if (MF.getFunction().hasStructRetAttr()) {
- unsigned Reg = LanaiMFI->getSRetReturnReg();
+ Register Reg = LanaiMFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
LanaiMFI->setSRetReturnReg(Reg);
@@ -577,7 +577,7 @@ LanaiTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
if (DAG.getMachineFunction().getFunction().hasStructRetAttr()) {
MachineFunction &MF = DAG.getMachineFunction();
LanaiMachineFunctionInfo *LanaiMFI = MF.getInfo<LanaiMachineFunctionInfo>();
- unsigned Reg = LanaiMFI->getSRetReturnReg();
+ Register Reg = LanaiMFI->getSRetReturnReg();
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments().");
SDValue Val =
@@ -1077,7 +1077,7 @@ SDValue LanaiTargetLowering::LowerRETURNADDR(SDValue Op,
// Return the link register, which contains the return address.
// Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
+ Register Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
}
diff --git a/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp b/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
index abe20c8e18cf4..03cf102051730 100644
--- a/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiRegisterInfo.cpp
@@ -165,7 +165,7 @@ void LanaiRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
if ((isSPLSOpcode(MI.getOpcode()) && !isInt<10>(Offset)) ||
!isInt<16>(Offset)) {
assert(RS && "Register scavenging must be on");
- unsigned Reg = RS->FindUnusedReg(&Lanai::GPRRegClass);
+ Register Reg = RS->FindUnusedReg(&Lanai::GPRRegClass);
if (!Reg)
Reg = RS->scavengeRegister(&Lanai::GPRRegClass, II, SPAdj);
assert(Reg && "Register scavenger failed");
diff --git a/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
index 4ef9a567d4539..6a8dc3502496f 100644
--- a/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp
@@ -190,7 +190,7 @@ bool MSP430FrameLowering::spillCalleeSavedRegisters(
MFI->setCalleeSavedFrameSize(CSI.size() * 2);
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
BuildMI(MBB, MI, DL, TII.get(MSP430::PUSH16r))
diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 7116d22b28894..aebfc6b0ae2ea 100644
--- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -705,7 +705,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
if (Ins[i].Flags.isSRet()) {
- unsigned Reg = FuncInfo->getSRetReturnReg();
+ Register Reg = FuncInfo->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(
getRegClassFor(MVT::i16));
@@ -772,7 +772,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
if (MF.getFunction().hasStructRetAttr()) {
MSP430MachineFunctionInfo *FuncInfo = MF.getInfo<MSP430MachineFunctionInfo>();
- unsigned Reg = FuncInfo->getSRetReturnReg();
+ Register Reg = FuncInfo->getSRetReturnReg();
if (!Reg)
llvm_unreachable("sret virtual register not created in entry block");
diff --git a/llvm/lib/Target/Mips/Mips16FrameLowering.cpp b/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
index 622f2039f9e41..4f4e3f3f2ed74 100644
--- a/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/llvm/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -74,7 +74,7 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF,
for (const CalleeSavedInfo &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned DReg = MRI->getDwarfRegNum(Reg, true);
unsigned CFIIndex = MF.addFrameInst(
MCCFIInstruction::createOffset(nullptr, DReg, Offset));
@@ -124,7 +124,7 @@ bool Mips16FrameLowering::spillCalleeSavedRegisters(
// method MipsTargetLowering::lowerRETURNADDR.
// It's killed at the spill, unless the register is RA and return address
// is taken.
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA)
&& MF->getFrameInfo().isReturnAddressTaken();
if (!IsRAAndRetAddrIsTaken)
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index 3403ec01aef2f..02d0e770ba66b 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -190,7 +190,7 @@ static void addSaveRestoreRegs(MachineInstrBuilder &MIB,
// method MipsTargetLowering::lowerRETURNADDR.
// It's killed at the spill, unless the register is RA and return address
// is taken.
- unsigned Reg = CSI[e-i-1].getReg();
+ Register Reg = CSI[e-i-1].getReg();
switch (Reg) {
case Mips::RA:
case Mips::S0:
diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index ab32af3ba275a..4bd8845e9cb9f 100644
--- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -337,7 +337,7 @@ void MipsAsmPrinter::printSavedRegsBitmask() {
unsigned CSFPRegsSize = 0;
for (const auto &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned RegNum = TRI->getEncodingValue(Reg);
// If it's a floating point register, set the FPU Bitmask.
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index 77d05abd3c93b..6ddfec5d0f791 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -313,7 +313,7 @@ unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
llvm_unreachable("unexpected opcode");
}
- unsigned LHSReg = getRegForValue(LHS);
+ Register LHSReg = getRegForValue(LHS);
if (!LHSReg)
return 0;
@@ -325,7 +325,7 @@ unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
if (!RHSReg)
return 0;
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!ResultReg)
return 0;
@@ -341,7 +341,7 @@ unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu),
ResultReg)
.addFrameIndex(SI->second)
@@ -362,7 +362,7 @@ unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
const TargetRegisterClass *RC) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
if (isInt<16>(Imm)) {
unsigned Opc = Mips::ADDiu;
@@ -376,7 +376,7 @@ unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
unsigned Hi = (Imm >> 16) & 0xFFFF;
if (Lo) {
// Both Lo and Hi have nonzero bits.
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
emitInst(Mips::LUi, TmpReg).addImm(Hi);
emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
} else {
@@ -391,13 +391,13 @@ unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
if (VT == MVT::f32) {
const TargetRegisterClass *RC = &Mips::FGR32RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
emitInst(Mips::MTC1, DestReg).addReg(TempReg);
return DestReg;
} else if (VT == MVT::f64) {
const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
unsigned TempReg2 =
materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
@@ -412,7 +412,7 @@ unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
if (VT != MVT::i32)
return 0;
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
bool IsThreadLocal = GVar && GVar->isThreadLocal();
// TLS not supported at this time.
@@ -423,7 +423,7 @@ unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
.addGlobalAddress(GV, 0, MipsII::MO_GOT);
if ((GV->hasInternalLinkage() ||
(GV->hasLocalLinkage() && !isa<Function>(GV)))) {
- unsigned TempReg = createResultReg(RC);
+ Register TempReg = createResultReg(RC);
emitInst(Mips::ADDiu, TempReg)
.addReg(DestReg)
.addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
@@ -434,7 +434,7 @@ unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
const TargetRegisterClass *RC = &Mips::GPR32RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
emitInst(Mips::LW, DestReg)
.addReg(MFI->getGlobalBaseReg(*MF))
.addSym(Sym, MipsII::MO_GOT);
@@ -649,13 +649,13 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
default:
return false;
case CmpInst::ICMP_EQ: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
break;
}
case CmpInst::ICMP_NE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
break;
@@ -667,13 +667,13 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
break;
case CmpInst::ICMP_UGE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
}
case CmpInst::ICMP_ULE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
@@ -685,13 +685,13 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
break;
case CmpInst::ICMP_SGE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
}
case CmpInst::ICMP_SLE: {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
break;
@@ -737,8 +737,8 @@ bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
default:
llvm_unreachable("Only switching of a subset of CCs.");
}
- unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
- unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
+ Register RegWithZero = createResultReg(&Mips::GPR32RegClass);
+ Register RegWithOne = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
@@ -964,7 +964,7 @@ bool MipsFastISel::selectBranch(const Instruction *I) {
// For the general case, we need to mask with 1.
if (ZExtCondReg == 0) {
- unsigned CondReg = getRegForValue(BI->getCondition());
+ Register CondReg = getRegForValue(BI->getCondition());
if (CondReg == 0)
return false;
@@ -982,7 +982,7 @@ bool MipsFastISel::selectBranch(const Instruction *I) {
bool MipsFastISel::selectCmp(const Instruction *I) {
const CmpInst *CI = cast<CmpInst>(I);
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!emitCmp(ResultReg, CI))
return false;
updateValueMap(I, ResultReg);
@@ -1000,13 +1000,13 @@ bool MipsFastISel::selectFPExt(const Instruction *I) {
if (SrcVT != MVT::f32 || DestVT != MVT::f64)
return false;
- unsigned SrcReg =
+ Register SrcReg =
getRegForValue(Src); // this must be a 32bit floating point register class
// maybe we should handle this
diff erently
if (!SrcReg)
return false;
- unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
+ Register DestReg = createResultReg(&Mips::AFGR64RegClass);
emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
updateValueMap(I, DestReg);
return true;
@@ -1041,22 +1041,22 @@ bool MipsFastISel::selectSelect(const Instruction *I) {
const SelectInst *SI = cast<SelectInst>(I);
const Value *Cond = SI->getCondition();
- unsigned Src1Reg = getRegForValue(SI->getTrueValue());
- unsigned Src2Reg = getRegForValue(SI->getFalseValue());
- unsigned CondReg = getRegForValue(Cond);
+ Register Src1Reg = getRegForValue(SI->getTrueValue());
+ Register Src2Reg = getRegForValue(SI->getFalseValue());
+ Register CondReg = getRegForValue(Cond);
if (!Src1Reg || !Src2Reg || !CondReg)
return false;
- unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
+ Register ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
if (!ZExtCondReg)
return false;
if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
return false;
- unsigned ResultReg = createResultReg(RC);
- unsigned TempReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
+ Register TempReg = createResultReg(RC);
if (!ResultReg || !TempReg)
return false;
@@ -1079,11 +1079,11 @@ bool MipsFastISel::selectFPTrunc(const Instruction *I) {
if (SrcVT != MVT::f64 || DestVT != MVT::f32)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
- unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
+ Register DestReg = createResultReg(&Mips::FGR32RegClass);
if (!DestReg)
return false;
@@ -1115,14 +1115,14 @@ bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0)
return false;
// Determine the opcode for the conversion, which takes place
// entirely within FPRs.
- unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
- unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
+ Register DestReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::FGR32RegClass);
unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
// Generate the convert.
@@ -1196,7 +1196,7 @@ bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
break;
}
}
- unsigned ArgReg = getRegForValue(ArgVal);
+ Register ArgReg = getRegForValue(ArgVal);
if (!ArgReg)
return false;
@@ -1294,7 +1294,7 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
CopyVT = MVT::i32;
- unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
+ Register ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
if (!ResultReg)
return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
@@ -1462,11 +1462,11 @@ bool MipsFastISel::fastLowerArguments() {
for (const auto &FormalArg : F->args()) {
unsigned ArgNo = FormalArg.getArgNo();
unsigned SrcReg = Allocation[ArgNo].Reg;
- unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
+ Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[ArgNo].RC);
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
- unsigned ResultReg = createResultReg(Allocation[ArgNo].RC);
+ Register ResultReg = createResultReg(Allocation[ArgNo].RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(DstReg, getKillRegState(true));
@@ -1594,10 +1594,10 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!isTypeSupported(RetTy, VT))
return false;
- unsigned SrcReg = getRegForValue(II->getOperand(0));
+ Register SrcReg = getRegForValue(II->getOperand(0));
if (SrcReg == 0)
return false;
- unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
+ Register DestReg = createResultReg(&Mips::GPR32RegClass);
if (DestReg == 0)
return false;
if (VT == MVT::i16) {
@@ -1621,7 +1621,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
}
} else if (VT == MVT::i32) {
if (Subtarget->hasMips32r2()) {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
updateValueMap(II, DestReg);
@@ -1720,7 +1720,7 @@ bool MipsFastISel::selectRet(const Instruction *I) {
if (!VA.isRegLoc())
return false;
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -1788,7 +1788,7 @@ bool MipsFastISel::selectTrunc(const Instruction *I) {
if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
return false;
- unsigned SrcReg = getRegForValue(Op);
+ Register SrcReg = getRegForValue(Op);
if (!SrcReg)
return false;
@@ -1804,7 +1804,7 @@ bool MipsFastISel::selectIntExt(const Instruction *I) {
Type *SrcTy = Src->getType();
bool isZExt = isa<ZExtInst>(I);
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -1818,7 +1818,7 @@ bool MipsFastISel::selectIntExt(const Instruction *I) {
MVT SrcVT = SrcEVT.getSimpleVT();
MVT DestVT = DestEVT.getSimpleVT();
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
return false;
@@ -1839,7 +1839,7 @@ bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
ShiftAmt = 16;
break;
}
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
return true;
@@ -1935,15 +1935,15 @@ bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
break;
}
- unsigned Src0Reg = getRegForValue(I->getOperand(0));
- unsigned Src1Reg = getRegForValue(I->getOperand(1));
+ Register Src0Reg = getRegForValue(I->getOperand(0));
+ Register Src1Reg = getRegForValue(I->getOperand(1));
if (!Src0Reg || !Src1Reg)
return false;
emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!ResultReg)
return false;
@@ -1962,19 +1962,19 @@ bool MipsFastISel::selectShift(const Instruction *I) {
if (!isTypeSupported(I->getType(), RetVT))
return false;
- unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
+ Register ResultReg = createResultReg(&Mips::GPR32RegClass);
if (!ResultReg)
return false;
unsigned Opcode = I->getOpcode();
const Value *Op0 = I->getOperand(0);
- unsigned Op0Reg = getRegForValue(Op0);
+ Register Op0Reg = getRegForValue(Op0);
if (!Op0Reg)
return false;
// If AShr or LShr, then we need to make sure the operand0 is sign extended.
if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
if (!TempReg)
return false;
@@ -2008,7 +2008,7 @@ bool MipsFastISel::selectShift(const Instruction *I) {
return true;
}
- unsigned Op1Reg = getRegForValue(I->getOperand(1));
+ Register Op1Reg = getRegForValue(I->getOperand(1));
if (!Op1Reg)
return false;
@@ -2091,7 +2091,7 @@ bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
bool IsUnsigned) {
- unsigned VReg = getRegForValue(V);
+ Register VReg = getRegForValue(V);
if (VReg == 0)
return 0;
MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
@@ -2100,7 +2100,7 @@ unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
return 0;
if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
- unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
+ Register TempReg = createResultReg(&Mips::GPR32RegClass);
if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
return 0;
VReg = TempReg;
@@ -2112,7 +2112,7 @@ void MipsFastISel::simplifyAddress(Address &Addr) {
if (!isInt<16>(Addr.getOffset())) {
unsigned TempReg =
materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
- unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
+ Register DestReg = createResultReg(&Mips::GPR32RegClass);
emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
Addr.setReg(DestReg);
Addr.setOffset(0);
@@ -2129,7 +2129,7 @@ unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
// followed by another instruction that defines the same registers too.
// We can fix this by explicitly marking those registers as dead.
if (MachineInstOpcode == Mips::MUL) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 8ef0de95fb5e7..0c2e129b8f1fc 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -2523,7 +2523,7 @@ SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
MFI.setReturnAddressIsTaken(true);
// Return RA, which contains the return address. Mark it an implicit live-in.
- unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
+ Register Reg = MF.addLiveIn(RA, getRegClassFor(VT));
return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
}
diff --git a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
index 193d071447ff3..7ee2ddf3605ff 100644
--- a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -454,7 +454,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
// directives.
for (const CalleeSavedInfo &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// If Reg is a double precision register, emit two cfa_offsets,
// one for each of the paired single precision registers.
@@ -801,7 +801,7 @@ bool MipsSEFrameLowering::spillCalleeSavedRegisters(
// method MipsTargetLowering::lowerRETURNADDR.
// It's killed at the spill, unless the register is RA and return address
// is taken.
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
&& MF->getFrameInfo().isReturnAddressTaken();
if (!IsRAAndRetAddrIsTaken)
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 1b1ac437c735c..96ea35bd86cd8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -1613,7 +1613,7 @@ void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
// We use the per class virtual register number in the ptx output.
unsigned int numVRs = MRI->getNumVirtRegs();
for (unsigned i = 0; i < numVRs; i++) {
- unsigned int vr = Register::index2VirtReg(i);
+ Register vr = Register::index2VirtReg(i);
const TargetRegisterClass *RC = MRI->getRegClass(vr);
DenseMap<unsigned, unsigned> ®map = VRegMapping[RC];
int n = regmap.size();
diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
index bfa9234630da2..e7cd107c50469 100644
--- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp
@@ -150,7 +150,7 @@ class PPCFastISel final : public FastISel {
unsigned copyRegToRegClass(const TargetRegisterClass *ToRC,
unsigned SrcReg, unsigned Flag = 0,
unsigned SubReg = 0) {
- unsigned TmpReg = createResultReg(ToRC);
+ Register TmpReg = createResultReg(ToRC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg, Flag, SubReg);
return TmpReg;
@@ -428,7 +428,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, bool &UseOffset,
// put the alloca address into a register, set the base type back to
// register and continue. This should almost never happen.
if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) {
- unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(Addr.Base.FI).addImm(0);
Addr.Base.Reg = ResultReg;
@@ -604,7 +604,7 @@ bool PPCFastISel::SelectLoad(const Instruction *I) {
// Look at the currently assigned register for this instruction
// to determine the required register class. This is necessary
// to constrain RA from using R0/X0 when this is not legal.
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr;
@@ -783,7 +783,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
PPCPred = PPC::InvertPredicate(PPCPred);
}
- unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
+ Register CondReg = createResultReg(&PPC::CRRCRegClass);
if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
CondReg, PPCPred))
@@ -847,7 +847,7 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
}
}
- unsigned SrcReg1 = getRegForValue(SrcValue1);
+ Register SrcReg1 = getRegForValue(SrcValue1);
if (SrcReg1 == 0)
return false;
@@ -928,13 +928,13 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
}
if (NeedsExt) {
- unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
+ Register ExtReg = createResultReg(&PPC::GPRCRegClass);
if (!PPCEmitIntExt(SrcVT, SrcReg1, MVT::i32, ExtReg, IsZExt))
return false;
SrcReg1 = ExtReg;
if (!UseImm) {
- unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
+ Register ExtReg = createResultReg(&PPC::GPRCRegClass);
if (!PPCEmitIntExt(SrcVT, SrcReg2, MVT::i32, ExtReg, IsZExt))
return false;
SrcReg2 = ExtReg;
@@ -960,7 +960,7 @@ bool PPCFastISel::SelectFPExt(const Instruction *I) {
if (SrcVT != MVT::f32 || DestVT != MVT::f64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -978,7 +978,7 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
if (SrcVT != MVT::f64 || DestVT != MVT::f32)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -1019,7 +1019,7 @@ unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
// If necessary, extend 32-bit int to 64-bit.
if (SrcVT == MVT::i32) {
- unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
+ Register TmpReg = createResultReg(&PPC::G8RCRegClass);
if (!PPCEmitIntExt(MVT::i32, SrcReg, MVT::i64, TmpReg, !IsSigned))
return 0;
SrcReg = TmpReg;
@@ -1079,7 +1079,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
SrcVT != MVT::i32 && SrcVT != MVT::i64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0)
return false;
@@ -1091,7 +1091,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
else
Opc = IsSigned ? PPC::EFDCFSI : PPC::EFDCFUI;
- unsigned DestReg = createResultReg(&PPC::SPERCRegClass);
+ Register DestReg = createResultReg(&PPC::SPERCRegClass);
// Generate the convert.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(SrcReg);
@@ -1114,7 +1114,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
// Extend the input if necessary.
if (SrcVT == MVT::i8 || SrcVT == MVT::i16) {
- unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
+ Register TmpReg = createResultReg(&PPC::G8RCRegClass);
if (!PPCEmitIntExt(SrcVT, SrcReg, MVT::i64, TmpReg, !IsSigned))
return false;
SrcVT = MVT::i64;
@@ -1128,7 +1128,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
// Determine the opcode for the conversion.
const TargetRegisterClass *RC = &PPC::F8RCRegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
unsigned Opc;
if (DstVT == MVT::f32)
@@ -1170,7 +1170,7 @@ unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
// Look at the currently assigned register for this instruction
// to determine the required register class.
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
AssignedReg ? MRI.getRegClass(AssignedReg) : nullptr;
@@ -1206,7 +1206,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (SrcReg == 0)
return false;
@@ -1276,7 +1276,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
// Look at the currently assigned register for this instruction
// to determine the required register class. If there is no register,
// make a conservative choice (don't assign R0).
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
(AssignedReg ? MRI.getRegClass(AssignedReg) :
&PPC::GPRC_and_GPRC_NOR0RegClass);
@@ -1296,8 +1296,8 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
break;
}
- unsigned ResultReg = createResultReg(RC ? RC : &PPC::G8RCRegClass);
- unsigned SrcReg1 = getRegForValue(I->getOperand(0));
+ Register ResultReg = createResultReg(RC ? RC : &PPC::G8RCRegClass);
+ Register SrcReg1 = getRegForValue(I->getOperand(0));
if (SrcReg1 == 0) return false;
// Handle case of small immediate operand.
@@ -1355,7 +1355,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
}
// Reg-reg case.
- unsigned SrcReg2 = getRegForValue(I->getOperand(1));
+ Register SrcReg2 = getRegForValue(I->getOperand(1));
if (SrcReg2 == 0) return false;
// Reverse operands for subtract-from.
@@ -1441,7 +1441,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
MVT DestVT = VA.getLocVT();
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/false))
llvm_unreachable("Failed to emit a sext!");
ArgVT = DestVT;
@@ -1453,7 +1453,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
MVT DestVT = VA.getLocVT();
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/true))
llvm_unreachable("Failed to emit a zext!");
ArgVT = DestVT;
@@ -1628,7 +1628,7 @@ bool PPCFastISel::fastLowerCall(CallLoweringInfo &CLI) {
if (ArgVT.isVector() || ArgVT == MVT::f128)
return false;
- unsigned Arg = getRegForValue(ArgValue);
+ Register Arg = getRegForValue(ArgValue);
if (Arg == 0)
return false;
@@ -1734,7 +1734,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
RetRegs.push_back(RetReg);
} else {
- unsigned Reg = getRegForValue(RV);
+ Register Reg = getRegForValue(RV);
if (Reg == 0)
return false;
@@ -1767,7 +1767,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
case CCValAssign::ZExt: {
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, true))
return false;
SrcReg = TmpReg;
@@ -1776,7 +1776,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
case CCValAssign::SExt: {
const TargetRegisterClass *RC =
(DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, false))
return false;
SrcReg = TmpReg;
@@ -1857,7 +1857,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
// Attempt to fast-select an indirect branch instruction.
bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
- unsigned AddrReg = getRegForValue(I->getOperand(0));
+ Register AddrReg = getRegForValue(I->getOperand(0));
if (AddrReg == 0)
return false;
@@ -1884,7 +1884,7 @@ bool PPCFastISel::SelectTrunc(const Instruction *I) {
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
return false;
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg)
return false;
@@ -1903,7 +1903,7 @@ bool PPCFastISel::SelectIntExt(const Instruction *I) {
Type *SrcTy = Src->getType();
bool IsZExt = isa<ZExtInst>(I);
- unsigned SrcReg = getRegForValue(Src);
+ Register SrcReg = getRegForValue(Src);
if (!SrcReg) return false;
EVT SrcEVT, DestEVT;
@@ -1921,12 +1921,12 @@ bool PPCFastISel::SelectIntExt(const Instruction *I) {
// instruction, use it. Otherwise pick the register class of the
// correct size that does not contain X0/R0, since we don't know
// whether downstream uses permit that assignment.
- unsigned AssignedReg = FuncInfo.ValueMap[I];
+ Register AssignedReg = FuncInfo.ValueMap[I];
const TargetRegisterClass *RC =
(AssignedReg ? MRI.getRegClass(AssignedReg) :
(DestVT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
&PPC::GPRC_and_GPRC_NOR0RegClass));
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
if (!PPCEmitIntExt(SrcVT, SrcReg, DestVT, ResultReg, IsZExt))
return false;
@@ -2003,7 +2003,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
else
RC = ((VT == MVT::f32) ? &PPC::F4RCRegClass : &PPC::F8RCRegClass);
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
CodeModel::Model CModel = TM.getCodeModel();
MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
@@ -2017,7 +2017,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
else
Opc = ((VT == MVT::f32) ? PPC::LFS : PPC::LFD);
- unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
PPCFuncInfo->setUsesTOCBasePtr();
// For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
@@ -2034,7 +2034,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
// But for large code model, we must generate a LDtocL followed
// by the LF[SD].
if (CModel == CodeModel::Large) {
- unsigned TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL),
TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
@@ -2059,7 +2059,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
assert(VT == MVT::i64 && "Non-address!");
const TargetRegisterClass *RC = &PPC::G8RC_and_G8RC_NOX0RegClass;
- unsigned DestReg = createResultReg(RC);
+ Register DestReg = createResultReg(RC);
// Global values may be plain old object addresses, TLS object
// addresses, constant pool entries, or jump tables. How we generate
@@ -2096,7 +2096,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// Otherwise we generate:
// ADDItocL(ADDIStocHA8(%x2, GV), GV)
// Either way, start with the ADDIStocHA8:
- unsigned HighPartReg = createResultReg(RC);
+ Register HighPartReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA8),
HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
@@ -2120,7 +2120,7 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
unsigned Lo = Imm & 0xFFFF;
unsigned Hi = (Imm >> 16) & 0xFFFF;
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
if (isInt<16>(Imm))
@@ -2129,7 +2129,7 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
.addImm(Imm);
else if (Lo) {
// Both Lo and Hi have nonzero bits.
- unsigned TmpReg = createResultReg(RC);
+ Register TmpReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
.addImm(Hi);
@@ -2192,7 +2192,7 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
TmpReg3 = TmpReg2;
if ((Lo = Remainder & 0xFFFF)) {
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORI8),
ResultReg).addReg(TmpReg3).addImm(Lo);
return ResultReg;
@@ -2208,7 +2208,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
// If we're using CR bit registers for i1 values, handle that as a special
// case first.
if (VT == MVT::i1 && Subtarget->useCRBits()) {
- unsigned ImmReg = createResultReg(&PPC::CRBITRCRegClass);
+ Register ImmReg = createResultReg(&PPC::CRBITRCRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(CI->isZero() ? PPC::CRUNSET : PPC::CRSET), ImmReg);
return ImmReg;
@@ -2228,7 +2228,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const ConstantInt *CI, MVT VT,
// a range of 0..0x7fff.
if (isInt<16>(Imm)) {
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
- unsigned ImmReg = createResultReg(RC);
+ Register ImmReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
.addImm(Imm);
return ImmReg;
@@ -2280,7 +2280,7 @@ unsigned PPCFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
+ Register ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(SI->second).addImm(0);
return ResultReg;
@@ -2390,7 +2390,7 @@ unsigned PPCFastISel::fastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
// If we're using CR bit registers for i1 values, handle that as a special
// case first.
if (VT == MVT::i1 && Subtarget->useCRBits()) {
- unsigned ImmReg = createResultReg(&PPC::CRBITRCRegClass);
+ Register ImmReg = createResultReg(&PPC::CRBITRCRegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Imm == 0 ? PPC::CRUNSET : PPC::CRSET), ImmReg);
return ImmReg;
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 1d195528c4c02..65c969c196e1d 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1173,7 +1173,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
// CFA.
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
// This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
@@ -1196,7 +1196,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF,
// In the ELFv1 ABI, only CR2 is noted in CFI and stands in for
// the whole CR word. In the ELFv2 ABI, every CR that was
// actually saved gets its own CFI record.
- unsigned CRReg = isELFv2ABI? Reg : (unsigned) PPC::CR2;
+ Register CRReg = isELFv2ABI? Reg : PPC::CR2;
unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
nullptr, MRI->getDwarfRegNum(CRReg, true), CRSaveOffset));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
@@ -2087,7 +2087,7 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
SmallVector<CalleeSavedInfo, 18> VRegs;
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
assert((!MF.getInfo<PPCFunctionInfo>()->mustSaveTOC() ||
(Reg != PPC::X2 && Reg != PPC::R2)) &&
"Not expecting to try to spill R2 in a function that must save TOC");
@@ -2339,7 +2339,7 @@ bool PPCFrameLowering::assignCalleeSavedSpillSlots(
if (BVAllocatable.none())
return false;
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
if (!PPC::G8RCRegClass.contains(Reg)) {
AllSpilledToReg = false;
@@ -2397,7 +2397,7 @@ bool PPCFrameLowering::spillCalleeSavedRegisters(
});
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
// CR2 through CR4 are the nonvolatile CR fields.
bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
@@ -2583,7 +2583,7 @@ bool PPCFrameLowering::restoreCalleeSavedRegisters(
--BeforeI;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
+ Register Reg = CSI[i].getReg();
if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
continue;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index d6c57d94e4120..3f68a9b0fdd90 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -4078,8 +4078,8 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// virtual ones.
if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
assert(i + 1 < e && "No second half of double precision argument");
- unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
- unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
+ Register RegLo = MF.addLiveIn(VA.getLocReg(), RC);
+ Register RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
if (!Subtarget.isLittleEndian())
@@ -4087,7 +4087,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
ArgValueHi);
} else {
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
ValVT == MVT::i1 ? MVT::i32 : ValVT);
if (ValVT == MVT::i1)
@@ -4179,7 +4179,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// dereferencing the result of va_next.
for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
// Get an existing live-in vreg, or add a new one.
- unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
+ Register VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
@@ -4198,7 +4198,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
// on the stack.
for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
// Get an existing live-in vreg, or add a new one.
- unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
+ Register VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
if (!VReg)
VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
@@ -4384,7 +4384,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
InVals.push_back(Arg);
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
EVT ObjType = EVT::getIntegerVT(*DAG.getContext(), ObjSize * 8);
@@ -4408,7 +4408,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
if (GPR_idx == Num_GPR_Regs)
break;
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Addr = FIN;
@@ -4432,7 +4432,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
case MVT::i64:
if (Flags.isNest()) {
// The 'nest' parameter, if any, is passed in R11.
- unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
@@ -4445,7 +4445,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. Clang may use those instead of "byval" aggregate
// types to avoid forcing arguments to memory unnecessarily.
if (GPR_idx != Num_GPR_Regs) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
@@ -4491,7 +4491,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// This can only ever happen in the presence of f32 array types,
// since otherwise we never run out of FPRs before running out
// of GPRs.
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
FuncInfo->addLiveInAttr(VReg, Flags);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
@@ -4532,7 +4532,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// passed directly. The latter are used to implement ELFv2 homogenous
// vector aggregates.
if (VR_idx != Num_VR_Regs) {
- unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
+ Register VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++VR_idx;
} else {
@@ -4591,7 +4591,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
// the result of va_next.
for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
GPR_idx < Num_GPR_Regs; ++GPR_idx) {
- unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
+ Register VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
@@ -7059,7 +7059,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
unsigned Offset) {
- const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
+ const Register VReg = MF.addLiveIn(PhysReg, RegClass);
// Since the callers side has left justified the aggregate in the
// register, we can simply store the entire register into the stack
// slot.
@@ -7156,7 +7156,7 @@ SDValue PPCTargetLowering::LowerFormalArguments_AIX(
(CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
GPRIndex < NumGPArgRegs; ++GPRIndex) {
- const unsigned VReg =
+ const Register VReg =
IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
: MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 61a6ed9e34384..7edec82c6e067 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -3253,7 +3253,7 @@ MachineInstr *PPCInstrInfo::getForwardingDefMI(
Register Reg = MI.getOperand(i).getReg();
if (!Register::isVirtualRegister(Reg))
continue;
- unsigned TrueReg = TRI->lookThruCopyLike(Reg, MRI);
+ Register TrueReg = TRI->lookThruCopyLike(Reg, MRI);
if (Register::isVirtualRegister(TrueReg)) {
DefMI = MRI->getVRegDef(TrueReg);
if (DefMI->getOpcode() == PPC::LI || DefMI->getOpcode() == PPC::LI8 ||
@@ -3502,8 +3502,8 @@ bool PPCInstrInfo::foldFrameOffset(MachineInstr &MI) const {
return false;
assert(ADDIMI && "There should be ADDIMI for valid ToBeChangedReg.");
- unsigned ToBeChangedReg = ADDIMI->getOperand(0).getReg();
- unsigned ScaleReg = ADDMI->getOperand(ScaleRegIdx).getReg();
+ Register ToBeChangedReg = ADDIMI->getOperand(0).getReg();
+ Register ScaleReg = ADDMI->getOperand(ScaleRegIdx).getReg();
auto NewDefFor = [&](unsigned Reg, MachineBasicBlock::iterator Start,
MachineBasicBlock::iterator End) {
for (auto It = ++Start; It != End; It++)
@@ -3720,7 +3720,7 @@ bool PPCInstrInfo::convertToImmediateForm(MachineInstr &MI,
bool PPCInstrInfo::combineRLWINM(MachineInstr &MI,
MachineInstr **ToErase) const {
MachineRegisterInfo *MRI = &MI.getParent()->getParent()->getRegInfo();
- unsigned FoldingReg = MI.getOperand(1).getReg();
+ Register FoldingReg = MI.getOperand(1).getReg();
if (!Register::isVirtualRegister(FoldingReg))
return false;
MachineInstr *SrcMI = MRI->getVRegDef(FoldingReg);
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index 7d8aee75cbe22..0c920582843ad 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -481,7 +481,7 @@ bool PPCMIPeephole::simplifyCode() {
// PPC::ZERO.
if (!MI.getOperand(1).isImm() || MI.getOperand(1).getImm() != 0)
break;
- unsigned MIDestReg = MI.getOperand(0).getReg();
+ Register MIDestReg = MI.getOperand(0).getReg();
for (MachineInstr& UseMI : MRI->use_instructions(MIDestReg))
Simplified |= TII->onlyFoldImmediate(UseMI, MI, MIDestReg);
if (MRI->use_nodbg_empty(MIDestReg)) {
@@ -519,9 +519,9 @@ bool PPCMIPeephole::simplifyCode() {
// XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), immed.
// We have to look through chains of COPY and SUBREG_TO_REG
// to find the real source values for comparison.
- unsigned TrueReg1 =
+ Register TrueReg1 =
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
- unsigned TrueReg2 =
+ Register TrueReg2 =
TRI->lookThruCopyLike(MI.getOperand(2).getReg(), MRI);
if (!(TrueReg1 == TrueReg2 && Register::isVirtualRegister(TrueReg1)))
@@ -541,7 +541,7 @@ bool PPCMIPeephole::simplifyCode() {
auto isConversionOfLoadAndSplat = [=]() -> bool {
if (DefOpc != PPC::XVCVDPSXDS && DefOpc != PPC::XVCVDPUXDS)
return false;
- unsigned FeedReg1 =
+ Register FeedReg1 =
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
if (Register::isVirtualRegister(FeedReg1)) {
MachineInstr *LoadMI = MRI->getVRegDef(FeedReg1);
@@ -565,16 +565,16 @@ bool PPCMIPeephole::simplifyCode() {
// If this is a splat or a swap fed by another splat, we
// can replace it with a copy.
if (DefOpc == PPC::XXPERMDI) {
- unsigned DefReg1 = DefMI->getOperand(1).getReg();
- unsigned DefReg2 = DefMI->getOperand(2).getReg();
+ Register DefReg1 = DefMI->getOperand(1).getReg();
+ Register DefReg2 = DefMI->getOperand(2).getReg();
unsigned DefImmed = DefMI->getOperand(3).getImm();
// If the two inputs are not the same register, check to see if
// they originate from the same virtual register after only
// copy-like instructions.
if (DefReg1 != DefReg2) {
- unsigned FeedReg1 = TRI->lookThruCopyLike(DefReg1, MRI);
- unsigned FeedReg2 = TRI->lookThruCopyLike(DefReg2, MRI);
+ Register FeedReg1 = TRI->lookThruCopyLike(DefReg1, MRI);
+ Register FeedReg2 = TRI->lookThruCopyLike(DefReg2, MRI);
if (!(FeedReg1 == FeedReg2 &&
Register::isVirtualRegister(FeedReg1)))
@@ -643,7 +643,7 @@ bool PPCMIPeephole::simplifyCode() {
case PPC::XXSPLTW: {
unsigned MyOpcode = MI.getOpcode();
unsigned OpNo = MyOpcode == PPC::XXSPLTW ? 1 : 2;
- unsigned TrueReg =
+ Register TrueReg =
TRI->lookThruCopyLike(MI.getOperand(OpNo).getReg(), MRI);
if (!Register::isVirtualRegister(TrueReg))
break;
@@ -707,7 +707,7 @@ bool PPCMIPeephole::simplifyCode() {
}
case PPC::XVCVDPSP: {
// If this is a DP->SP conversion fed by an FRSP, the FRSP is redundant.
- unsigned TrueReg =
+ Register TrueReg =
TRI->lookThruCopyLike(MI.getOperand(1).getReg(), MRI);
if (!Register::isVirtualRegister(TrueReg))
break;
@@ -716,9 +716,9 @@ bool PPCMIPeephole::simplifyCode() {
// This can occur when building a vector of single precision or integer
// values.
if (DefMI && DefMI->getOpcode() == PPC::XXPERMDI) {
- unsigned DefsReg1 =
+ Register DefsReg1 =
TRI->lookThruCopyLike(DefMI->getOperand(1).getReg(), MRI);
- unsigned DefsReg2 =
+ Register DefsReg2 =
TRI->lookThruCopyLike(DefMI->getOperand(2).getReg(), MRI);
if (!Register::isVirtualRegister(DefsReg1) ||
!Register::isVirtualRegister(DefsReg2))
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 4bccc5596d2b8..422b3db4f978a 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -423,7 +423,7 @@ bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) co
continue;
int FrIdx = Info[i].getFrameIdx();
- unsigned Reg = Info[i].getReg();
+ Register Reg = Info[i].getReg();
const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg);
unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 0bcc156a50170..a4e752b7e8839 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -915,7 +915,7 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
.addMBB(&DestBB, RISCVII::MO_CALL);
RS->enterBasicBlockEnd(MBB);
- unsigned Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
+ Register Scav = RS->scavengeRegisterBackwards(RISCV::GPRRegClass,
MI.getIterator(), false, 0);
// TODO: The case when there is no scavenged register needs special handling.
assert(Scav != RISCV::NoRegister && "No register is scavenged!");
diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index dd084f53e5116..c167c095521a7 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -172,7 +172,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
default:
llvm_unreachable("Unknown operand type");
case MachineOperand::MO_Register: {
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
if (RISCV::VRM2RegClass.contains(Reg) ||
RISCV::VRM4RegClass.contains(Reg) ||
diff --git a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index ed380d309bd71..c5d0f1de7dfd9 100644
--- a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -66,7 +66,7 @@ class SparcDAGToDAGISel : public SelectionDAGISel {
} // end anonymous namespace
SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
- unsigned GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF);
+ Register GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF);
return CurDAG->getRegister(GlobalBaseReg,
TLI->getPointerTy(CurDAG->getDataLayout()))
.getNode();
@@ -220,8 +220,8 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){
assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
SDValue V0 = N->getOperand(i+1);
SDValue V1 = N->getOperand(i+2);
- unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
- unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
+ Register Reg0 = cast<RegisterSDNode>(V0)->getReg();
+ Register Reg1 = cast<RegisterSDNode>(V1)->getReg();
SDValue PairedReg;
MachineRegisterInfo &MRI = MF->getRegInfo();
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 20deabd233a9f..52b120031e3f0 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -2684,7 +2684,7 @@ static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
SDValue RetAddr;
if (depth == 0) {
auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
- unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
+ Register RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
return RetAddr;
}
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index 99ab4c5455d67..1adc8994e0f4d 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -103,7 +103,7 @@ bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
unsigned HighGPR = SystemZ::R15D;
int StartSPOffset = SystemZMC::ELFCallFrameSize;
for (auto &CS : CSI) {
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
int Offset = getRegSpillOffset(MF, Reg);
if (Offset) {
if (SystemZ::GR64BitRegClass.contains(Reg) && StartSPOffset > Offset) {
@@ -124,7 +124,7 @@ bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
// Also save the GPR varargs, if any. R6D is call-saved, so would
// already be included, but we also need to handle the call-clobbered
// argument registers.
- unsigned FirstGPR = ZFI->getVarArgsFirstGPR();
+ Register FirstGPR = ZFI->getVarArgsFirstGPR();
if (FirstGPR < SystemZ::ELFNumArgGPRs) {
unsigned Reg = SystemZ::ELFArgGPRs[FirstGPR];
int Offset = getRegSpillOffset(MF, Reg);
@@ -143,7 +143,7 @@ bool SystemZELFFrameLowering::assignCalleeSavedSpillSlots(
for (auto &CS : CSI) {
if (CS.getFrameIdx() != INT32_MAX)
continue;
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
unsigned Size = TRI->getSpillSize(*RC);
CurrOffset -= Size;
@@ -271,7 +271,7 @@ bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
// Make sure all call-saved GPRs are included as operands and are
// marked as live on entry.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::GR64BitRegClass.contains(Reg))
addSavedGPR(MBB, MIB, Reg, true);
}
@@ -284,7 +284,7 @@ bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
// Save FPRs/VRs in the normal TargetInstrInfo way.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
@@ -314,7 +314,7 @@ bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
// Restore FPRs/VRs in the normal TargetInstrInfo way.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
&SystemZ::FP64BitRegClass, TRI);
@@ -346,7 +346,7 @@ bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
// Do a second scan adding regs as being defined by instruction
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (Reg != RestoreGPRs.LowGPR && Reg != RestoreGPRs.HighGPR &&
SystemZ::GR64BitRegClass.contains(Reg))
MIB.addReg(Reg, RegState::ImplicitDefine);
@@ -500,7 +500,7 @@ void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF,
// Add CFI for the GPR saves.
for (auto &Save : CSI) {
- unsigned Reg = Save.getReg();
+ Register Reg = Save.getReg();
if (SystemZ::GR64BitRegClass.contains(Reg)) {
int FI = Save.getFrameIdx();
int64_t Offset = MFFrame.getObjectOffset(FI);
@@ -580,7 +580,7 @@ void SystemZELFFrameLowering::emitPrologue(MachineFunction &MF,
// Skip over the FPR/VR saves.
SmallVector<unsigned, 8> CFIIndexes;
for (auto &Save : CSI) {
- unsigned Reg = Save.getReg();
+ Register Reg = Save.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
if (MBBI != MBB.end() &&
(MBBI->getOpcode() == SystemZ::STD ||
@@ -850,7 +850,7 @@ bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots(
auto ProcessCSI = [&](std::vector<CalleeSavedInfo> &CSIList) {
for (auto &CS : CSIList) {
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
int Offset = RegSpillOffsets[Reg];
if (Offset >= 0) {
if (GRRegClass.contains(Reg)) {
@@ -895,7 +895,7 @@ bool SystemZXPLINKFrameLowering::assignCalleeSavedSpillSlots(
for (auto &CS : CSI) {
if (CS.getFrameIdx() != INT32_MAX)
continue;
- unsigned Reg = CS.getReg();
+ Register Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
Align Alignment = TRI->getSpillAlign(*RC);
unsigned Size = TRI->getSpillSize(*RC);
@@ -966,7 +966,7 @@ bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
// marked as live on entry.
auto &GRRegClass = SystemZ::GR64BitRegClass;
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (GRRegClass.contains(Reg))
addSavedGPR(MBB, MIB, Reg, true);
}
@@ -974,7 +974,7 @@ bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
// Spill FPRs to the stack in the normal TargetInstrInfo way
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
@@ -1007,7 +1007,7 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
// Restore FPRs in the normal TargetInstrInfo way.
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- unsigned Reg = CSI[I].getReg();
+ Register Reg = CSI[I].getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(),
&SystemZ::FP64BitRegClass, TRI);
@@ -1041,7 +1041,7 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
// Do a second scan adding regs as being defined by instruction
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
- unsigned Reg = CSI[I].getReg();
+ Register Reg = CSI[I].getReg();
if (Reg > RestoreGPRs.LowGPR && Reg < RestoreGPRs.HighGPR)
MIB.addReg(Reg, RegState::ImplicitDefine);
}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 24de528507713..e1549f3012f95 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1571,7 +1571,7 @@ SDValue SystemZTargetLowering::LowerFormalArguments(
int FI =
MFI.CreateFixedObject(8, -SystemZMC::ELFCallFrameSize + Offset, true);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
- unsigned VReg = MF.addLiveIn(SystemZ::ELFArgFPRs[I],
+ Register VReg = MF.addLiveIn(SystemZ::ELFArgFPRs[I],
&SystemZ::FP64BitRegClass);
SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
@@ -3417,7 +3417,7 @@ SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
}
// Return R14D, which has the return address. Mark it an implicit live-in.
- unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
+ Register LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
}
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index e80496e37781c..6db9bf3056b7f 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1309,7 +1309,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
// allocated regs are in an FP reg-class per previous check above.
for (const MachineOperand &MO : MIB->operands())
if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) {
- unsigned Reg = MO.getReg();
+ Register Reg = MO.getReg();
if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index f513b130a1e67..abedee4788d9d 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -420,7 +420,7 @@ SDValue VETargetLowering::LowerFormalArguments(
// All integer register arguments are promoted by the caller to i64.
// Create a virtual register for the promoted live-in value.
- unsigned VReg =
+ Register VReg =
MF.addLiveIn(VA.getLocReg(), getRegClassFor(VA.getLocVT()));
SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
@@ -755,7 +755,7 @@ SDValue VETargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(!VA.needsCustom() && "Unexpected custom lowering");
- unsigned Reg = VA.getLocReg();
+ Register Reg = VA.getLocReg();
// When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
// reside in the same register in the high and low bits. Reuse the
@@ -1546,7 +1546,7 @@ static SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
unsigned Depth = Op.getConstantOperandVal(0);
const VERegisterInfo *RegInfo = Subtarget->getRegisterInfo();
- unsigned FrameReg = RegInfo->getFrameRegister(MF);
+ Register FrameReg = RegInfo->getFrameRegister(MF);
SDValue FrameAddr =
DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, PtrVT);
while (Depth--)
diff --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index 46846edfeafba..7c1bd52018672 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -248,7 +248,7 @@ unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB,
const TargetRegisterInfo *TRI = &getRegisterInfo();
MachineFunction *MF = MBB.getParent();
const MachineRegisterInfo &MRI = MF->getRegInfo();
- unsigned Reg = Cond[2].getReg();
+ Register Reg = Cond[2].getReg();
if (IsIntegerCC(Cond[0].getImm())) {
if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
opc[0] = VE::BRCFWir;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
index 910a4e5e0d1ac..eeec0fc671cce 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -406,7 +406,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
// TODO: Sort the locals for better compression.
MFI.setNumLocals(CurLocal - MFI.getParams().size());
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
auto RL = Reg2Local.find(Reg);
if (RL == Reg2Local.end() || RL->second < MFI.getParams().size())
continue;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 642aa6b4028ab..406edef8ff3fb 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -286,7 +286,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
}
if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
// An unscaled add of a register. Set it as the new base.
- unsigned Reg = getRegForValue(Op);
+ Register Reg = getRegForValue(Op);
if (Reg == 0)
return false;
Addr.setReg(Reg);
@@ -372,7 +372,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
if (Addr.isSet()) {
return false;
}
- unsigned Reg = getRegForValue(Obj);
+ Register Reg = getRegForValue(Obj);
if (Reg == 0)
return false;
Addr.setReg(Reg);
@@ -430,7 +430,7 @@ unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V,
}
Not = false;
- unsigned Reg = getRegForValue(V);
+ Register Reg = getRegForValue(V);
if (Reg == 0)
return 0;
return maskI1Value(Reg, V);
@@ -458,12 +458,12 @@ unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
return 0;
}
- unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ Register Imm = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::CONST_I32), Imm)
.addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
- unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ Register Result = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::AND_I32), Result)
.addReg(Reg)
@@ -488,18 +488,18 @@ unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
return 0;
}
- unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ Register Imm = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::CONST_I32), Imm)
.addImm(32 - MVT(From).getSizeInBits());
- unsigned Left = createResultReg(&WebAssembly::I32RegClass);
+ Register Left = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::SHL_I32), Left)
.addReg(Reg)
.addReg(Imm);
- unsigned Right = createResultReg(&WebAssembly::I32RegClass);
+ Register Right = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::SHR_S_I32), Right)
.addReg(Left)
@@ -517,7 +517,7 @@ unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
Reg = zeroExtendToI32(Reg, V, From);
- unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ Register Result = createResultReg(&WebAssembly::I64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
.addReg(Reg);
@@ -539,7 +539,7 @@ unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
Reg = signExtendToI32(Reg, V, From);
- unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ Register Result = createResultReg(&WebAssembly::I64RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
.addReg(Reg);
@@ -555,7 +555,7 @@ unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
MVT::SimpleValueType From = getSimpleType(V->getType());
MVT::SimpleValueType To = getLegalType(From);
- unsigned VReg = getRegForValue(V);
+ Register VReg = getRegForValue(V);
if (VReg == 0)
return 0;
return zeroExtend(VReg, V, From, To);
@@ -564,7 +564,7 @@ unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
MVT::SimpleValueType From = getSimpleType(V->getType());
MVT::SimpleValueType To = getLegalType(From);
- unsigned VReg = getRegForValue(V);
+ Register VReg = getRegForValue(V);
if (VReg == 0)
return 0;
return signExtend(VReg, V, From, To);
@@ -578,7 +578,7 @@ unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
- unsigned NotReg = createResultReg(&WebAssembly::I32RegClass);
+ Register NotReg = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::EQZ_I32), NotReg)
.addReg(Reg);
@@ -586,7 +586,7 @@ unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
}
unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
- unsigned ResultReg = createResultReg(MRI.getRegClass(Reg));
+ Register ResultReg = createResultReg(MRI.getRegClass(Reg));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(WebAssembly::COPY),
ResultReg)
.addReg(Reg);
@@ -598,7 +598,7 @@ unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
- unsigned ResultReg =
+ Register ResultReg =
createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
: &WebAssembly::I32RegClass);
unsigned Opc =
@@ -617,7 +617,7 @@ unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
return 0;
if (GV->isThreadLocal())
return 0;
- unsigned ResultReg =
+ Register ResultReg =
createResultReg(Subtarget->hasAddr64() ? &WebAssembly::I64RegClass
: &WebAssembly::I32RegClass);
unsigned Opc = Subtarget->hasAddr64() ? WebAssembly::CONST_I64
@@ -715,7 +715,7 @@ bool WebAssemblyFastISel::fastLowerArguments() {
default:
return false;
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(I);
updateValueMap(&Arg, ResultReg);
@@ -887,7 +887,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
if (Subtarget->hasAddr64()) {
auto Wrap = BuildMI(*FuncInfo.MBB, std::prev(FuncInfo.InsertPt), DbgLoc,
TII.get(WebAssembly::I32_WRAP_I64));
- unsigned Reg32 = createResultReg(&WebAssembly::I32RegClass);
+ Register Reg32 = createResultReg(&WebAssembly::I32RegClass);
Wrap.addReg(Reg32, RegState::Define);
Wrap.addReg(CalleeReg);
CalleeReg = Reg32;
@@ -914,11 +914,11 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
if (CondReg == 0)
return false;
- unsigned TrueReg = getRegForValue(Select->getTrueValue());
+ Register TrueReg = getRegForValue(Select->getTrueValue());
if (TrueReg == 0)
return false;
- unsigned FalseReg = getRegForValue(Select->getFalseValue());
+ Register FalseReg = getRegForValue(Select->getFalseValue());
if (FalseReg == 0)
return false;
@@ -959,7 +959,7 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
return false;
}
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(TrueReg)
.addReg(FalseReg)
@@ -972,12 +972,12 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
const auto *Trunc = cast<TruncInst>(I);
- unsigned Reg = getRegForValue(Trunc->getOperand(0));
+ Register Reg = getRegForValue(Trunc->getOperand(0));
if (Reg == 0)
return false;
if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) {
- unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ Register Result = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(WebAssembly::I32_WRAP_I64), Result)
.addReg(Reg);
@@ -994,7 +994,7 @@ bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
const Value *Op = ZExt->getOperand(0);
MVT::SimpleValueType From = getSimpleType(Op->getType());
MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
- unsigned In = getRegForValue(Op);
+ Register In = getRegForValue(Op);
if (In == 0)
return false;
unsigned Reg = zeroExtend(In, Op, From, To);
@@ -1011,7 +1011,7 @@ bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
const Value *Op = SExt->getOperand(0);
MVT::SimpleValueType From = getSimpleType(Op->getType());
MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
- unsigned In = getRegForValue(Op);
+ Register In = getRegForValue(Op);
if (In == 0)
return false;
unsigned Reg = signExtend(In, Op, From, To);
@@ -1075,7 +1075,7 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
if (RHS == 0)
return false;
- unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(LHS)
.addReg(RHS);
@@ -1086,11 +1086,11 @@ bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
const auto *FCmp = cast<FCmpInst>(I);
- unsigned LHS = getRegForValue(FCmp->getOperand(0));
+ Register LHS = getRegForValue(FCmp->getOperand(0));
if (LHS == 0)
return false;
- unsigned RHS = getRegForValue(FCmp->getOperand(1));
+ Register RHS = getRegForValue(FCmp->getOperand(1));
if (RHS == 0)
return false;
@@ -1136,7 +1136,7 @@ bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
return false;
}
- unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ Register ResultReg = createResultReg(&WebAssembly::I32RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(LHS)
.addReg(RHS);
@@ -1157,7 +1157,7 @@ bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
if (!VT.isSimple() || !RetVT.isSimple())
return false;
- unsigned In = getRegForValue(I->getOperand(0));
+ Register In = getRegForValue(I->getOperand(0));
if (In == 0)
return false;
@@ -1229,7 +1229,7 @@ bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
materializeLoadStoreOperands(Addr);
- unsigned ResultReg = createResultReg(RC);
+ Register ResultReg = createResultReg(RC);
auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
ResultReg);
@@ -1284,7 +1284,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) {
materializeLoadStoreOperands(Addr);
- unsigned ValueReg = getRegForValue(Store->getValueOperand());
+ Register ValueReg = getRegForValue(Store->getValueOperand());
if (ValueReg == 0)
return false;
if (VTIsi1)
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 5a505ddfc1122..a221f37cfd945 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -1628,7 +1628,7 @@ SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
// local.copy between Op and its FI operand.
SDValue Chain = Op.getOperand(0);
SDLoc DL(Op);
- unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
+ Register Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
EVT VT = Src.getValueType();
SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
: WebAssembly::COPY_I64,
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
index 9d83a75a82478..6a6cac6d956fb 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -82,7 +82,7 @@ bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(
// Split multiple-VN LiveIntervals into multiple LiveIntervals.
SmallVector<LiveInterval *, 4> SplitLIs;
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
auto &TRI = *MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
if (MRI.reg_nodbg_empty(Reg))
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
index 8b8593ddcbdd4..5682cadc1a64f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -95,7 +95,7 @@ bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(
// TODO: This is fairly heavy-handed; find a better approach.
//
for (unsigned I = 0, E = MRI.getNumVirtRegs(); I < E; ++I) {
- unsigned Reg = Register::index2VirtReg(I);
+ Register Reg = Register::index2VirtReg(I);
// Skip unused registers.
if (MRI.use_nodbg_empty(Reg))
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
index fe127dec8aede..5252db4858b97 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -98,7 +98,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "Interesting register intervals:\n");
for (unsigned I = 0; I < NumVRegs; ++I) {
- unsigned VReg = Register::index2VirtReg(I);
+ Register VReg = Register::index2VirtReg(I);
if (MFI.isVRegStackified(VReg))
continue;
// Skip unused registers, which can use $drop.
@@ -135,7 +135,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
LiveInterval *LI = SortedIntervals[I];
- unsigned Old = LI->reg();
+ Register Old = LI->reg();
size_t Color = I;
const TargetRegisterClass *RC = MRI->getRegClass(Old);
@@ -152,7 +152,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
continue_outer:;
}
- unsigned New = SortedIntervals[Color]->reg();
+ Register New = SortedIntervals[Color]->reg();
SlotMapping[I] = New;
Changed |= Old != New;
UsedColors.set(Color);
@@ -168,7 +168,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
// Rewrite register operands.
for (size_t I = 0, E = SortedIntervals.size(); I < E; ++I) {
- unsigned Old = SortedIntervals[I]->reg();
+ Register Old = SortedIntervals[I]->reg();
unsigned New = SlotMapping[I];
if (Old != New)
MRI->replaceRegWith(Old, New);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
index c73b8a29daebd..76c78cd231305 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -89,7 +89,7 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
// Start the numbering for locals after the arg regs
unsigned CurReg = MFI.getParams().size();
for (unsigned VRegIdx = 0; VRegIdx < NumVRegs; ++VRegIdx) {
- unsigned VReg = Register::index2VirtReg(VRegIdx);
+ Register VReg = Register::index2VirtReg(VRegIdx);
// Skip unused registers.
if (MRI.use_empty(VReg))
continue;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
index 42419259802ed..d3ad47147ac85 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -909,8 +909,8 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
SubsequentUse != Use.getParent()->uses().end()) {
if (!SubsequentDef->isReg() || !SubsequentUse->isReg())
break;
- unsigned DefReg = SubsequentDef->getReg();
- unsigned UseReg = SubsequentUse->getReg();
+ Register DefReg = SubsequentDef->getReg();
+ Register UseReg = SubsequentUse->getReg();
// TODO: This single-use restriction could be relaxed by using tees
if (DefReg != UseReg || !MRI.hasOneUse(DefReg))
break;
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index 3981e0f24e6ee..51f2ced321bb7 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -479,7 +479,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(
// Calculate offsets.
for (const CalleeSavedInfo &I : CSI) {
int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
if (IsPrologue) {
@@ -2533,7 +2533,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// Assign slots for GPRs. It increases frame size.
for (CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
@@ -2550,7 +2550,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// Assign slots for XMMs.
for (CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
@@ -2596,7 +2596,7 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
const MachineFunction &MF = *MBB.getParent();
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
continue;
@@ -2630,7 +2630,7 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
// Make XMM regs spilled. X86 does not have ability of push/pop XMM.
// It can be done by spilling XMMs to stack frame.
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
@@ -2708,7 +2708,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
// Reload XMMs from stack frame.
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (X86::GR64RegClass.contains(Reg) ||
X86::GR32RegClass.contains(Reg))
continue;
@@ -2725,7 +2725,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
// POP GPRs.
unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
if (!X86::GR64RegClass.contains(Reg) &&
!X86::GR32RegClass.contains(Reg))
continue;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 83a4a025f518c..dba11e8b40001 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1139,7 +1139,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
// branch back to itself. We can do this here because at this point, every
// predecessor of this block has an available value. This is basically just
// automating the construction of a PHI node for this target.
- unsigned TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
+ Register TargetReg = TargetAddrSSA.GetValueInMiddleOfBlock(&MBB);
// Insert a comparison of the incoming target register with this block's
// address. This also requires us to mark the block as having its address
@@ -1642,7 +1642,7 @@ void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
return;
// Compute the current predicate state.
- unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
+ Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
auto InsertPt = MI.getIterator();
@@ -1913,7 +1913,7 @@ unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
auto *RC = MRI->getRegClass(Reg);
int Bytes = TRI->getRegSizeInBits(*RC) / 8;
- unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
+ Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
assert((Bytes == 1 || Bytes == 2 || Bytes == 4 || Bytes == 8) &&
"Unknown register size");
@@ -2078,7 +2078,7 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
// First, we transfer the predicate state into the called function by merging
// it into the stack pointer. This will kill the current def of the state.
- unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
+ Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
// If this call is also a return, it is a tail call and we don't need anything
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index f2f89f4269ed6..19ebcb3ea3e88 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -428,7 +428,7 @@ bool XCoreFrameLowering::spillCalleeSavedRegisters(
DL = MI->getDebugLoc();
for (const CalleeSavedInfo &I : CSI) {
- unsigned Reg = I.getReg();
+ Register Reg = I.getReg();
assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
"LR & FP are always handled in emitPrologue");
@@ -455,7 +455,7 @@ bool XCoreFrameLowering::restoreCalleeSavedRegisters(
if (!AtStart)
--BeforeI;
for (const CalleeSavedInfo &CSR : CSI) {
- unsigned Reg = CSR.getReg();
+ Register Reg = CSR.getReg();
assert(Reg != XCore::LR && !(Reg == XCore::R10 && hasFP(*MF)) &&
"LR & FP are always handled in emitEpilogue");
diff --git a/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index 6799823f6fcb7..0d1ba39b8b10b 100644
--- a/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -97,7 +97,7 @@ static void InsertFPConstInst(MachineBasicBlock::iterator II,
MachineInstr &MI = *II;
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc dl = MI.getDebugLoc();
- unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ Register ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchOffset);
TII.loadImmediate(MBB, II, ScratchOffset, Offset);
@@ -174,7 +174,7 @@ static void InsertSPConstInst(MachineBasicBlock::iterator II,
} else
ScratchBase = Reg;
BuildMI(MBB, II, dl, TII.get(XCore::LDAWSP_ru6), ScratchBase).addImm(0);
- unsigned ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
+ Register ScratchOffset = RS->scavengeRegister(&XCore::GRRegsRegClass, II, 0);
RS->setRegUsed(ScratchOffset);
TII.loadImmediate(MBB, II, ScratchOffset, Offset);
More information about the llvm-commits
mailing list