[llvm] 4a51b61 - [AArch64] Clean up assorted usage of hasOneUse/use_instructions [9/14]

Vedant Kumar via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 22 17:05:22 PDT 2020


Author: Vedant Kumar
Date: 2020-04-22T17:03:40-07:00
New Revision: 4a51b61cb32358b1bdf3b90dd5220e5a6c65c3af

URL: https://github.com/llvm/llvm-project/commit/4a51b61cb32358b1bdf3b90dd5220e5a6c65c3af
DIFF: https://github.com/llvm/llvm-project/commit/4a51b61cb32358b1bdf3b90dd5220e5a6c65c3af.diff

LOG: [AArch64] Clean up assorted usage of hasOneUse/use_instructions [9/14]

Summary:
Use the variants of these APIs which skip over debug instructions. This
is mostly a cleanup, but it does fix a debug-variance issue which causes
addsub-shifted.ll and addsub_ext.ll to fail when debug info is inserted
by -mir-debugify.

Reviewers: aemerson, paquette

Subscribers: kristof.beyls, hiraditya, jfb, danielkiss, llvm-commits, aprantl

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D78262

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
    llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
    llvm/test/CodeGen/AArch64/addsub-shifted.ll
    llvm/test/CodeGen/AArch64/addsub_ext.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
index f4b139b4a796..0bc805cc3e9d 100644
--- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -1024,7 +1024,7 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
     unsigned Opc = MI->getOpcode();
 
     if (!MI->getOperand(0).isReg() ||
-        !MRI.hasOneUse(MI->getOperand(0).getReg()))
+        !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
       break;
 
     // (tbz (any_ext x), b) -> (tbz x, b) if we don't use the extended bits.
@@ -1035,7 +1035,7 @@ static Register getTestBitReg(Register Reg, uint64_t &Bit, bool &Invert,
         Opc == TargetOpcode::G_TRUNC) {
       Register NextReg = MI->getOperand(1).getReg();
       // Did we find something worth folding?
-      if (!NextReg.isValid() || !MRI.hasOneUse(NextReg))
+      if (!NextReg.isValid() || !MRI.hasOneNonDBGUse(NextReg))
         break;
 
       // NextReg is worth folding. Keep looking.
@@ -3828,13 +3828,12 @@ bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const {
   while (CondDef) {
     // We can only fold if all of the defs have one use.
     Register CondDefReg = CondDef->getOperand(0).getReg();
-    if (!MRI.hasOneUse(CondDefReg)) {
+    if (!MRI.hasOneNonDBGUse(CondDefReg)) {
       // Unless it's another select.
-      for (auto UI = MRI.use_instr_begin(CondDefReg), UE = MRI.use_instr_end();
-           UI != UE; ++UI) {
-        if (CondDef == &*UI)
+      for (const MachineInstr &UI : MRI.use_nodbg_instructions(CondDefReg)) {
+        if (CondDef == &UI)
           continue;
-        if (UI->getOpcode() != TargetOpcode::G_SELECT)
+        if (UI.getOpcode() != TargetOpcode::G_SELECT)
           return false;
       }
     }
@@ -4659,7 +4658,7 @@ bool AArch64InstructionSelector::isWorthFoldingIntoExtendedReg(
     MachineInstr &MI, const MachineRegisterInfo &MRI) const {
   // Always fold if there is one use, or if we're optimizing for size.
   Register DefReg = MI.getOperand(0).getReg();
-  if (MRI.hasOneUse(DefReg) ||
+  if (MRI.hasOneNonDBGUse(DefReg) ||
       MI.getParent()->getParent()->getFunction().hasMinSize())
     return true;
 
@@ -4671,7 +4670,7 @@ bool AArch64InstructionSelector::isWorthFoldingIntoExtendedReg(
   // We have a fastpath, so folding a shift in and potentially computing it
   // many times may be beneficial. Check if this is only used in memory ops.
   // If it is, then we should fold.
-  return all_of(MRI.use_instructions(DefReg),
+  return all_of(MRI.use_nodbg_instructions(DefReg),
                 [](MachineInstr &Use) { return Use.mayLoadOrStore(); });
 }
 
@@ -4829,7 +4828,7 @@ AArch64InstructionSelector::selectAddrModeRegisterOffset(
   // If this is used more than once, let's not bother folding.
   // TODO: Check if they are memory ops. If they are, then we can still fold
   // without having to recompute anything.
-  if (!MRI.hasOneUse(Gep->getOperand(0).getReg()))
+  if (!MRI.hasOneNonDBGUse(Gep->getOperand(0).getReg()))
     return None;
 
   // Base is the GEP's LHS, offset is its RHS.

diff  --git a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
index d925fa3214b1..e671ff216214 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterBankInfo.cpp
@@ -684,7 +684,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
       // In that case, we want the default mapping to be on FPR
       // instead of blind map every scalar to GPR.
       for (const MachineInstr &UseMI :
-           MRI.use_instructions(MI.getOperand(0).getReg())) {
+           MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) {
         // If we have at least one direct use in a FP instruction,
         // assume this was a floating point load in the IR.
         // If it was not, we would have had a bitcast before
@@ -731,9 +731,8 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     //
     // %z = G_SELECT %cond %x %y
     // fpr = G_FOO %z ...
-    if (any_of(
-            MRI.use_instructions(MI.getOperand(0).getReg()),
-            [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
+    if (any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
+               [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); }))
       ++NumFP;
 
     // Check if the defs of the source values always produce floating point
@@ -774,7 +773,7 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     // UNMERGE into scalars from a vector should always use FPR.
     // Likewise if any of the uses are FP instructions.
     if (SrcTy.isVector() || SrcTy == LLT::scalar(128) ||
-        any_of(MRI.use_instructions(MI.getOperand(0).getReg()),
+        any_of(MRI.use_nodbg_instructions(MI.getOperand(0).getReg()),
                [&](MachineInstr &MI) { return onlyUsesFP(MI, MRI, TRI); })) {
       // Set the register bank of every operand to FPR.
       for (unsigned Idx = 0, NumOperands = MI.getNumOperands();

diff  --git a/llvm/test/CodeGen/AArch64/addsub-shifted.ll b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
index c0034024914d..eb0d13a9096b 100644
--- a/llvm/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
-; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 -global-isel -pass-remarks-missed=gisel* 2>&1 | FileCheck %s --check-prefixes=GISEL,FALLBACK
+; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; RUN: llc -debugify-and-strip-all-safe -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 -global-isel -pass-remarks-missed=gisel* 2>&1 | FileCheck %s --check-prefixes=GISEL,FALLBACK
 
 ; FALLBACK-NOT: remark
 

diff  --git a/llvm/test/CodeGen/AArch64/addsub_ext.ll b/llvm/test/CodeGen/AArch64/addsub_ext.ll
index 573ce9b6f33a..7804ffa52d16 100644
--- a/llvm/test/CodeGen/AArch64/addsub_ext.ll
+++ b/llvm/test/CodeGen/AArch64/addsub_ext.ll
@@ -1,5 +1,5 @@
-; RUN: llc -enable-machine-outliner=never -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s
-; RUN: llc -global-isel -enable-machine-outliner=never -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s --check-prefix=GISEL
+; RUN: llc -debugify-and-strip-all-safe -enable-machine-outliner=never -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s
+; RUN: llc -debugify-and-strip-all-safe -global-isel -enable-machine-outliner=never -verify-machineinstrs %s -o - -mtriple=aarch64-linux-gnu -aarch64-enable-atomic-cfg-tidy=0 | FileCheck %s --check-prefix=GISEL
 
 ; FIXME: GISel only knows how to handle explicit G_SEXT instructions. So when
 ; G_SEXT is lowered to anything else, it won't fold in a stx*.


        


More information about the llvm-commits mailing list