[llvm] 9acd954 - AMDGPU: Use Register
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 27 13:53:44 PST 2019
Author: Matt Arsenault
Date: 2019-12-27T16:53:21-05:00
New Revision: 9acd9544db9c3e5193389851915dbb69b5b685c2
URL: https://github.com/llvm/llvm-project/commit/9acd9544db9c3e5193389851915dbb69b5b685c2
DIFF: https://github.com/llvm/llvm-project/commit/9acd9544db9c3e5193389851915dbb69b5b685c2.diff
LOG: AMDGPU: Use Register
Added:
Modified:
llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
index 1179c4ede048..a9717c6ffb70 100644
--- a/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
+++ b/llvm/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
@@ -57,7 +57,7 @@ char SIOptimizeExecMasking::ID = 0;
char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
/// If \p MI is a copy from exec, return the register copied to.
-static unsigned isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
+static Register isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
switch (MI.getOpcode()) {
case AMDGPU::COPY:
case AMDGPU::S_MOV_B64:
@@ -75,7 +75,7 @@ static unsigned isCopyFromExec(const MachineInstr &MI, const GCNSubtarget &ST) {
}
/// If \p MI is a copy to exec, return the register copied from.
-static unsigned isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
+static Register isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
switch (MI.getOpcode()) {
case AMDGPU::COPY:
case AMDGPU::S_MOV_B64:
@@ -92,12 +92,12 @@ static unsigned isCopyToExec(const MachineInstr &MI, const GCNSubtarget &ST) {
llvm_unreachable("should have been replaced");
}
- return AMDGPU::NoRegister;
+ return Register();
}
/// If \p MI is a logical operation on an exec value,
/// return the register copied to.
-static unsigned isLogicalOpOnExec(const MachineInstr &MI) {
+static Register isLogicalOpOnExec(const MachineInstr &MI) {
switch (MI.getOpcode()) {
case AMDGPU::S_AND_B64:
case AMDGPU::S_OR_B64:
@@ -245,8 +245,8 @@ static MachineBasicBlock::reverse_iterator findExecCopy(
auto E = MBB.rend();
for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
- unsigned CopyFromExec = isCopyFromExec(*I, ST);
- if (CopyFromExec != AMDGPU::NoRegister)
+ Register CopyFromExec = isCopyFromExec(*I, ST);
+ if (CopyFromExec.isValid())
return I;
}
@@ -272,7 +272,7 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = ST.getRegisterInfo();
const SIInstrInfo *TII = ST.getInstrInfo();
- unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
+ MCRegister Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
// Optimize sequences emitted for control flow lowering. They are originally
// emitted as the separate operations because spill code may need to be
@@ -291,8 +291,8 @@ bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
if (I == E)
continue;
- unsigned CopyToExec = isCopyToExec(*I, ST);
- if (CopyToExec == AMDGPU::NoRegister)
+ Register CopyToExec = isCopyToExec(*I, ST);
+ if (!CopyToExec.isValid())
continue;
// Scan backwards to find the def.
More information about the llvm-commits
mailing list