[llvm-branch-commits] [llvm] 9e16c5b - [AArch64][GlobalISel] Look through a G_ZEXT when trying to match shift-extended register offsets.

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Dec 15 15:29:50 PST 2020


Author: Amara Emerson
Date: 2020-12-15T18:28:58-05:00
New Revision: 9e16c5bfae6e8d3cbec74376c2e734e3ff4ba11b

URL: https://github.com/llvm/llvm-project/commit/9e16c5bfae6e8d3cbec74376c2e734e3ff4ba11b
DIFF: https://github.com/llvm/llvm-project/commit/9e16c5bfae6e8d3cbec74376c2e734e3ff4ba11b.diff

LOG: [AArch64][GlobalISel] Look through a G_ZEXT when trying to match shift-extended register offsets.

The G_ZEXT in these cases seems to actually come from a combine that we do but
SelectionDAG doesn't. Looking through it allows us to match "uxtw #2" addressing
modes.

Differential Revision: https://reviews.llvm.org/D91475

(cherry picked from commit 0b6090699ab542cde24be1565b4d97dbad153cba)

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 408f0cb77e73..90bab9603245 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -4904,9 +4904,19 @@ AArch64InstructionSelector::selectExtendedSHL(
     return None;
 
   unsigned OffsetOpc = OffsetInst->getOpcode();
-  if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL)
-    return None;
+  bool LookedThroughZExt = false;
+  if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL) {
+    // Try to look through a ZEXT.
+    if (OffsetOpc != TargetOpcode::G_ZEXT || !WantsExt)
+      return None;
+
+    OffsetInst = MRI.getVRegDef(OffsetInst->getOperand(1).getReg());
+    OffsetOpc = OffsetInst->getOpcode();
+    LookedThroughZExt = true;
 
+    if (OffsetOpc != TargetOpcode::G_SHL && OffsetOpc != TargetOpcode::G_MUL)
+      return None;
+  }
   // Make sure that the memory op is a valid size.
   int64_t LegalShiftVal = Log2_32(SizeInBytes);
   if (LegalShiftVal == 0)
@@ -4957,20 +4967,23 @@ AArch64InstructionSelector::selectExtendedSHL(
 
   unsigned SignExtend = 0;
   if (WantsExt) {
-    // Check if the offset is defined by an extend.
-    MachineInstr *ExtInst = getDefIgnoringCopies(OffsetReg, MRI);
-    auto Ext = getExtendTypeForInst(*ExtInst, MRI, true);
-    if (Ext == AArch64_AM::InvalidShiftExtend)
-      return None;
+    // Check if the offset is defined by an extend, unless we looked through a
+    // G_ZEXT earlier.
+    if (!LookedThroughZExt) {
+      MachineInstr *ExtInst = getDefIgnoringCopies(OffsetReg, MRI);
+      auto Ext = getExtendTypeForInst(*ExtInst, MRI, true);
+      if (Ext == AArch64_AM::InvalidShiftExtend)
+        return None;
 
-    SignExtend = isSignExtendShiftType(Ext) ? 1 : 0;
-    // We only support SXTW for signed extension here.
-    if (SignExtend && Ext != AArch64_AM::SXTW)
-      return None;
+      SignExtend = isSignExtendShiftType(Ext) ? 1 : 0;
+      // We only support SXTW for signed extension here.
+      if (SignExtend && Ext != AArch64_AM::SXTW)
+        return None;
+      OffsetReg = ExtInst->getOperand(1).getReg();
+    }
 
     // Need a 32-bit wide register here.
     MachineIRBuilder MIB(*MRI.getVRegDef(Root.getReg()));
-    OffsetReg = ExtInst->getOperand(1).getReg();
     OffsetReg = narrowExtendRegIfNeeded(OffsetReg, MIB);
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
index 6b4b51d37ca8..8efd7648eed9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
@@ -428,3 +428,39 @@ body:             |
     $x1 = COPY %load(s64)
     RET_ReallyLR implicit $x1
 ...
+---
+name:            zext_shl_LDRWroW
+alignment:       4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+liveins:
+  - { reg: '$w0' }
+  - { reg: '$x1' }
+body:             |
+  bb.1:
+    liveins: $w0, $x1
+
+    ; We try to look through the G_ZEXT of the SHL here.
+
+    ; CHECK-LABEL: name: zext_shl_LDRWroW
+    ; CHECK: liveins: $w0, $x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32common = ANDWri [[COPY]], 7
+    ; CHECK: [[LDRWroW:%[0-9]+]]:gpr32 = LDRWroW [[COPY1]], [[ANDWri]], 0, 1 :: (load 4)
+    ; CHECK: $w0 = COPY [[LDRWroW]]
+    ; CHECK: RET_ReallyLR implicit $w0
+    %0:gpr(s32) = COPY $w0
+    %1:gpr(p0) = COPY $x1
+    %2:gpr(s32) = G_CONSTANT i32 255
+    %3:gpr(s32) = G_AND %0, %2
+    %13:gpr(s64) = G_CONSTANT i64 2
+    %12:gpr(s32) = G_SHL %3, %13(s64)
+    %6:gpr(s64) = G_ZEXT %12(s32)
+    %7:gpr(p0) = G_PTR_ADD %1, %6(s64)
+    %9:gpr(s32) = G_LOAD %7(p0) :: (load 4)
+    $w0 = COPY %9(s32)
+    RET_ReallyLR implicit $w0
+
+...


        


More information about the llvm-branch-commits mailing list