[PATCH] D65360: [AArch64][GlobalISel] Eliminate redundant G_ZEXT when the source is implicitly zext-loaded
Amara Emerson via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 2 14:16:18 PDT 2019
This revision was automatically updated to reflect the committed changes.
Closed by commit rL367723: [AArch64][GlobalISel] Eliminate redundant G_ZEXT when the source is implicitly… (authored by aemerson, committed by ).
Changed prior to commit:
https://reviews.llvm.org/D65360?vs=212032&id=213124#toc
Repository:
rL LLVM
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D65360/new/
https://reviews.llvm.org/D65360
Files:
llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-redundant-zext-of-load.mir
llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
===================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
@@ -68,8 +68,8 @@
; CHECK-LABEL: name: zextload_s32_from_s16_not_combined
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr)
- ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[LDRHHui]], 0, 15
- ; CHECK: $w0 = COPY [[UBFMWri]]
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRHHui]]
+ ; CHECK: $w0 = COPY [[COPY1]]
%0:gpr(p0) = COPY $x0
%1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
%2:gpr(s32) = G_ZEXT %1
Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-redundant-zext-of-load.mir
===================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-redundant-zext-of-load.mir
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-redundant-zext-of-load.mir
@@ -0,0 +1,48 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -O0 -run-pass=instruction-select -verify-machineinstrs %s -global-isel-abort=1 -o - | FileCheck %s
+---
+name: redundant_zext_8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: redundant_zext_8
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1)
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRBBui]]
+ ; CHECK: $w0 = COPY [[COPY1]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %1:gpr(p0) = COPY $x0
+ %2:gpr(s8) = G_LOAD %1(p0) :: (load 1)
+ %3:gpr(s32) = G_ZEXT %2(s8)
+ $w0 = COPY %3(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: redundant_zext_16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: redundant_zext_16
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2)
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRHHui]]
+ ; CHECK: $w0 = COPY [[COPY1]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %1:gpr(p0) = COPY $x0
+ %2:gpr(s16) = G_LOAD %1(p0) :: (load 2)
+ %3:gpr(s32) = G_ZEXT %2(s16)
+ $w0 = COPY %3(s32)
+ RET_ReallyLR implicit $w0
+
+...
Index: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
===================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
+++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp
@@ -2045,6 +2045,23 @@
if (DstTy.isVector())
return false; // Should be handled by imported patterns.
+ // First check if we're extending the result of a load which has a dest type
+ // smaller than 32 bits, then this zext is redundant. GPR32 is the smallest
+ // GPR register on AArch64 and all loads which are smaller automatically
+ // zero-extend the upper bits. E.g.
+ // %v(s8) = G_LOAD %p, :: (load 1)
+ // %v2(s32) = G_ZEXT %v(s8)
+ if (!IsSigned) {
+ auto *LoadMI = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
+ if (LoadMI &&
+ RBI.getRegBank(SrcReg, MRI, TRI)->getID() == AArch64::GPRRegBankID) {
+ const MachineMemOperand *MemOp = *LoadMI->memoperands_begin();
+ unsigned BytesLoaded = MemOp->getSize();
+ if (BytesLoaded < 4 && SrcTy.getSizeInBytes() == BytesLoaded)
+ return selectCopy(I, TII, MRI, TRI, RBI);
+ }
+ }
+
if (DstSize == 64) {
// FIXME: Can we avoid manually doing this?
if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D65360.213124.patch
Type: text/x-patch
Size: 4011 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20190802/d5cc937c/attachment.bin>
More information about the llvm-commits
mailing list