[llvm] [AArch64][GISel] Always fold G_SHL into addressing mode where possible, unless the subtarget has addr-lsl-slow-14 (PR #96603)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 17 03:54:11 PDT 2024
https://github.com/Him188 updated https://github.com/llvm/llvm-project/pull/96603
>From ee8a1afdb45a4515e1026cbb75ef1f5c7eaf69b3 Mon Sep 17 00:00:00 2001
From: Tianyi Guan <tguan at nvidia.com>
Date: Mon, 1 Jul 2024 10:52:25 +0100
Subject: [PATCH] [AArch64][GISel] Always fold G_SHL into addressing mode where
possible, unless the subtarget has addr-lsl-slow-14
---
.../GISel/AArch64InstructionSelector.cpp | 76 +++++++--
.../GlobalISel/load-addressing-modes.mir | 156 ++++++++++++------
.../GlobalISel/store-addressing-modes.mir | 62 ++++---
.../CodeGen/AArch64/aarch64-fold-lslfast.ll | 49 +++---
4 files changed, 231 insertions(+), 112 deletions(-)
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 24d65624e09e9..0d3f6d9e353ba 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -414,8 +414,13 @@ class AArch64InstructionSelector : public InstructionSelector {
return selectAddrModeIndexed(Root, Width / 8);
}
+ std::optional<bool>
+ isWorthFoldingIntoAddrMode(MachineInstr &MI,
+ const MachineRegisterInfo &MRI) const;
+
bool isWorthFoldingIntoExtendedReg(MachineInstr &MI,
- const MachineRegisterInfo &MRI) const;
+ const MachineRegisterInfo &MRI,
+ bool IsAddrOperand) const;
ComplexRendererFns
selectAddrModeShiftedExtendXReg(MachineOperand &Root,
unsigned SizeInBytes) const;
@@ -6869,19 +6874,70 @@ AArch64InstructionSelector::selectNegArithImmed(MachineOperand &Root) const {
return select12BitValueWithLeftShift(Immed);
}
+/// Checks if we are sure that folding MI into load/store addressing mode is
+/// beneficial or not.
+///
+/// Returns:
+/// - true if folding MI would be beneficial.
+/// - false if folding MI would be bad.
+/// - std::nullopt if it is not sure whether folding MI is beneficial.
+///
+/// \p MI can be the offset operand of G_PTR_ADD, e.g. G_SHL in the example:
+///
+/// %13:gpr(s64) = G_CONSTANT i64 1
+/// %8:gpr(s64) = G_SHL %6, %13(s64)
+/// %9:gpr(p0) = G_PTR_ADD %0, %8(s64)
+/// %12:gpr(s32) = G_LOAD %9(p0) :: (load (s16))
+std::optional<bool> AArch64InstructionSelector::isWorthFoldingIntoAddrMode(
+ MachineInstr &MI, const MachineRegisterInfo &MRI) const {
+ if (MI.getOpcode() == AArch64::G_SHL) {
+ // Address operands with shifts are free, except for running on subtargets
+ // with AddrLSLSlow14.
+ if (const auto ValAndVeg = getIConstantVRegValWithLookThrough(
+ MI.getOperand(2).getReg(), MRI)) {
+ const APInt ShiftVal = ValAndVeg->Value;
+
+ // Don't fold if we know this will be slow.
+ return !(STI.hasAddrLSLSlow14() && (ShiftVal == 1 || ShiftVal == 4));
+ }
+ }
+ return std::nullopt;
+}
+
/// Return true if it is worth folding MI into an extended register. That is,
/// if it's safe to pull it into the addressing mode of a load or store as a
/// shift.
+/// \p IsAddrOperand whether the def of MI is used as an address operand
+/// (e.g. feeding into an LDR/STR).
bool AArch64InstructionSelector::isWorthFoldingIntoExtendedReg(
- MachineInstr &MI, const MachineRegisterInfo &MRI) const {
+ MachineInstr &MI, const MachineRegisterInfo &MRI,
+ bool IsAddrOperand) const {
+
// Always fold if there is one use, or if we're optimizing for size.
Register DefReg = MI.getOperand(0).getReg();
if (MRI.hasOneNonDBGUse(DefReg) ||
MI.getParent()->getParent()->getFunction().hasOptSize())
return true;
- // FIXME: Consider checking HasAddrLSLSlow14 and HasALULSLFast as
- // appropriate.
+ if (IsAddrOperand) {
+ // If we are already sure that folding MI is good or bad, return the result.
+ if (const auto Worth = isWorthFoldingIntoAddrMode(MI, MRI))
+ return *Worth;
+
+ // Fold G_PTR_ADD if its offset operand can be folded
+ if (MI.getOpcode() == AArch64::G_PTR_ADD) {
+ MachineInstr *OffsetInst =
+ getDefIgnoringCopies(MI.getOperand(2).getReg(), MRI);
+
+ // Note, we already know G_PTR_ADD is used by at least two instructions.
+ // If we are also sure about whether folding is beneficial or not,
+ // return the result.
+ if (const auto Worth = isWorthFoldingIntoAddrMode(*OffsetInst, MRI))
+ return *Worth;
+ }
+ }
+
+ // FIXME: Consider checking HasALULSLFast as appropriate.
// We have a fastpath, so folding a shift in and potentially computing it
// many times may be beneficial. Check if this is only used in memory ops.
@@ -6929,7 +6985,7 @@ AArch64InstructionSelector::selectExtendedSHL(
int64_t LegalShiftVal = Log2_32(SizeInBytes);
if (LegalShiftVal == 0)
return std::nullopt;
- if (!isWorthFoldingIntoExtendedReg(*OffsetInst, MRI))
+ if (!isWorthFoldingIntoExtendedReg(*OffsetInst, MRI, true))
return std::nullopt;
// Now, try to find the specific G_CONSTANT. Start by assuming that the
@@ -7036,7 +7092,7 @@ AArch64InstructionSelector::selectAddrModeShiftedExtendXReg(
// Check if we can find the G_PTR_ADD.
MachineInstr *PtrAdd =
getOpcodeDef(TargetOpcode::G_PTR_ADD, Root.getReg(), MRI);
- if (!PtrAdd || !isWorthFoldingIntoExtendedReg(*PtrAdd, MRI))
+ if (!PtrAdd || !isWorthFoldingIntoExtendedReg(*PtrAdd, MRI, true))
return std::nullopt;
// Now, try to match an opcode which will match our specific offset.
@@ -7170,7 +7226,7 @@ AArch64InstructionSelector::selectAddrModeWRO(MachineOperand &Root,
MachineInstr *PtrAdd =
getOpcodeDef(TargetOpcode::G_PTR_ADD, Root.getReg(), MRI);
- if (!PtrAdd || !isWorthFoldingIntoExtendedReg(*PtrAdd, MRI))
+ if (!PtrAdd || !isWorthFoldingIntoExtendedReg(*PtrAdd, MRI, true))
return std::nullopt;
MachineOperand &LHS = PtrAdd->getOperand(1);
@@ -7201,7 +7257,7 @@ AArch64InstructionSelector::selectAddrModeWRO(MachineOperand &Root,
//
// e.g.
// ldr something, [base_reg, ext_reg, sxtw]
- if (!isWorthFoldingIntoExtendedReg(*OffsetInst, MRI))
+ if (!isWorthFoldingIntoExtendedReg(*OffsetInst, MRI, true))
return std::nullopt;
// Check if this is an extend. We'll get an extend type if it is.
@@ -7396,7 +7452,7 @@ AArch64InstructionSelector::selectShiftedRegister(MachineOperand &Root,
return std::nullopt;
if (ShType == AArch64_AM::ROR && !AllowROR)
return std::nullopt;
- if (!isWorthFoldingIntoExtendedReg(*ShiftInst, MRI))
+ if (!isWorthFoldingIntoExtendedReg(*ShiftInst, MRI, false))
return std::nullopt;
// Need an immediate on the RHS.
@@ -7510,7 +7566,7 @@ AArch64InstructionSelector::selectArithExtendedRegister(
if (!RootDef)
return std::nullopt;
- if (!isWorthFoldingIntoExtendedReg(*RootDef, MRI))
+ if (!isWorthFoldingIntoExtendedReg(*RootDef, MRI, false))
return std::nullopt;
// Check if we can fold a shift and an extend.
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 3af2aaf57eed8..dc2e1c5dc28d4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -535,13 +535,13 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
- ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
+ ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
- ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
+ ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+ ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
@@ -571,19 +571,36 @@ body: |
liveins: $x0, $x1, $x2
liveins: $w1, $x0
- ; CHECK-LABEL: name: ldrhrox_more_than_one_mem_use_shl
- ; CHECK: liveins: $x0, $x1, $x2, $w1, $x0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
- ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
- ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
- ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
- ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
- ; CHECK-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
- ; CHECK-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
- ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
- ; CHECK-NEXT: RET_ReallyLR implicit [[ADDWrr]]
+ ; CHECK-FAST-LABEL: name: ldrhrox_more_than_one_mem_use_shl
+ ; CHECK-FAST: liveins: $x0, $x1, $x2, $w1, $x0
+ ; CHECK-FAST-NEXT: {{ $}}
+ ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-FAST-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
+ ; CHECK-FAST-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
+ ; CHECK-FAST-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
+ ; CHECK-FAST-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
+ ; CHECK-FAST-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
+ ; CHECK-FAST-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
+ ; CHECK-FAST-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
+ ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDWrr]]
+ ;
+ ; CHECK-SLOW-LABEL: name: ldrhrox_more_than_one_mem_use_shl
+ ; CHECK-SLOW: liveins: $x0, $x1, $x2, $w1, $x0
+ ; CHECK-SLOW-NEXT: {{ $}}
+ ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-SLOW-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
+ ; CHECK-SLOW-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
+ ; CHECK-SLOW-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
+ ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]].sub_32
+ ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
+ ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64common = COPY [[COPY]]
+ ; CHECK-SLOW-NEXT: [[ADDXrx:%[0-9]+]]:gpr64sp = ADDXrx [[COPY4]], [[COPY3]], 1
+ ; CHECK-SLOW-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
+ ; CHECK-SLOW-NEXT: [[LDRHHui1:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
+ ; CHECK-SLOW-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHui]], [[LDRHHui1]]
+ ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDWrr]]
%0:gpr(p0) = COPY $x0
%1:gpr(s32) = COPY $w1
%15:gpr(s64) = G_CONSTANT i64 9
@@ -612,19 +629,36 @@ body: |
liveins: $x0, $x1, $x2
liveins: $w1, $x0
- ; CHECK-LABEL: name: ldrhrox_more_than_one_use_shl
- ; CHECK: liveins: $x0, $x1, $x2, $w1, $x0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
- ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
- ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
- ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
- ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
- ; CHECK-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
- ; CHECK-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
- ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
- ; CHECK-NEXT: RET_ReallyLR implicit [[ADDWrr]]
+ ; CHECK-FAST-LABEL: name: ldrhrox_more_than_one_use_shl
+ ; CHECK-FAST: liveins: $x0, $x1, $x2, $w1, $x0
+ ; CHECK-FAST-NEXT: {{ $}}
+ ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+ ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-FAST-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
+ ; CHECK-FAST-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
+ ; CHECK-FAST-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
+ ; CHECK-FAST-NEXT: [[ANDXri:%[0-9]+]]:gpr64common = ANDXri [[SUBREG_TO_REG]], 4103
+ ; CHECK-FAST-NEXT: [[LDRHHroX:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
+ ; CHECK-FAST-NEXT: [[LDRHHroX1:%[0-9]+]]:gpr32 = LDRHHroX [[COPY]], [[ANDXri]], 0, 1 :: (load (s16))
+ ; CHECK-FAST-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHroX]], [[LDRHHroX1]]
+ ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDWrr]]
+ ;
+ ; CHECK-SLOW-LABEL: name: ldrhrox_more_than_one_use_shl
+ ; CHECK-SLOW: liveins: $x0, $x1, $x2, $w1, $x0
+ ; CHECK-SLOW-NEXT: {{ $}}
+ ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+ ; CHECK-SLOW-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 9, 31
+ ; CHECK-SLOW-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[UBFMWri]], 0
+ ; CHECK-SLOW-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
+ ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]].sub_32
+ ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
+ ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64common = COPY [[COPY]]
+ ; CHECK-SLOW-NEXT: [[ADDXrx:%[0-9]+]]:gpr64sp = ADDXrx [[COPY4]], [[COPY3]], 1
+ ; CHECK-SLOW-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
+ ; CHECK-SLOW-NEXT: [[LDRHHui1:%[0-9]+]]:gpr32 = LDRHHui [[ADDXrx]], 0 :: (load (s16))
+ ; CHECK-SLOW-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRHHui]], [[LDRHHui1]]
+ ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDWrr]]
%0:gpr(p0) = COPY $x0
%1:gpr(s32) = COPY $w1
%15:gpr(s64) = G_CONSTANT i64 9
@@ -656,15 +690,15 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 62, 61
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
- ; CHECK-NEXT: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[ADDXrr]], 0 :: (load (s32) from %ir.addr)
- ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[LDRWui]], 0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+ ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
+ ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY1]], [[COPY]], 0, 1 :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[LDRWroX]], 0
; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 2, 0
; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[SUBREG_TO_REG]], [[ADDXri]]
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
+ ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
; CHECK-NEXT: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
@@ -692,21 +726,37 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1, $x2
- ; CHECK-LABEL: name: ldrqrox_more_than_one_use_shl
- ; CHECK: liveins: $x0, $x1, $x2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
- ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
- ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADDXrr]], 0 :: (load (s128) from %ir.addr)
- ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[LDRQui]].dsub
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY [[COPY2]]
- ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY3]], [[ADDXri]]
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
- ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXrr1]]
- ; CHECK-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
+ ; CHECK-FAST-LABEL: name: ldrqrox_more_than_one_use_shl
+ ; CHECK-FAST: liveins: $x0, $x1, $x2
+ ; CHECK-FAST-NEXT: {{ $}}
+ ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-FAST-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
+ ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+ ; CHECK-FAST-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+ ; CHECK-FAST-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[UBFMXri]]
+ ; CHECK-FAST-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY1]], [[COPY]], 0, 1 :: (load (s128) from %ir.addr)
+ ; CHECK-FAST-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
+ ; CHECK-FAST-NEXT: [[COPY3:%[0-9]+]]:fpr64 = COPY [[LDRQroX]].dsub
+ ; CHECK-FAST-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[COPY3]]
+ ; CHECK-FAST-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXri]]
+ ; CHECK-FAST-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrr]], [[ADDXrr1]]
+ ; CHECK-FAST-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
+ ;
+ ; CHECK-SLOW-LABEL: name: ldrqrox_more_than_one_use_shl
+ ; CHECK-SLOW: liveins: $x0, $x1, $x2
+ ; CHECK-SLOW-NEXT: {{ $}}
+ ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-SLOW-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 60, 59
+ ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-SLOW-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
+ ; CHECK-SLOW-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADDXrr]], 0 :: (load (s128) from %ir.addr)
+ ; CHECK-SLOW-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 4, 0
+ ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[LDRQui]].dsub
+ ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr64 = COPY [[COPY2]]
+ ; CHECK-SLOW-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY3]], [[ADDXri]]
+ ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+ ; CHECK-SLOW-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY4]], [[ADDXrr1]]
+ ; CHECK-SLOW-NEXT: RET_ReallyLR implicit [[ADDXrr2]]
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 4
%2:gpr(s64) = G_SHL %0, %1(s64)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
index 62ebe86504bfa..94af12a91ae97 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-addressing-modes.mir
@@ -241,16 +241,28 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1, $x2
- ; CHECK-LABEL: name: shl_slow_1_more_than_one_use
- ; CHECK: liveins: $x0, $x1, $x2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]].sub_32
- ; CHECK-NEXT: STRHHroX [[COPY3]], [[COPY1]], [[COPY]], 0, 1 :: (store (s16) into %ir.addr)
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr32 = COPY [[COPY2]].sub_32
- ; CHECK-NEXT: STRHHroX [[COPY4]], [[COPY1]], [[COPY]], 0, 1 :: (store (s16) into %ir.addr)
+ ; CHECK-FAST-LABEL: name: shl_slow_1_more_than_one_use
+ ; CHECK-FAST: liveins: $x0, $x1, $x2
+ ; CHECK-FAST-NEXT: {{ $}}
+ ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-FAST-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
+ ; CHECK-FAST-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]].sub_32
+ ; CHECK-FAST-NEXT: STRHHroX [[COPY3]], [[COPY1]], [[COPY]], 0, 1 :: (store (s16) into %ir.addr)
+ ; CHECK-FAST-NEXT: [[COPY4:%[0-9]+]]:gpr32 = COPY [[COPY2]].sub_32
+ ; CHECK-FAST-NEXT: STRHHroX [[COPY4]], [[COPY1]], [[COPY]], 0, 1 :: (store (s16) into %ir.addr)
+ ;
+ ; CHECK-SLOW-LABEL: name: shl_slow_1_more_than_one_use
+ ; CHECK-SLOW: liveins: $x0, $x1, $x2
+ ; CHECK-SLOW-NEXT: {{ $}}
+ ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-SLOW-NEXT: %ptr:gpr64common = ADDXrs [[COPY1]], [[COPY]], 1
+ ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $x2
+ ; CHECK-SLOW-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]].sub_32
+ ; CHECK-SLOW-NEXT: STRHHui [[COPY3]], %ptr, 0 :: (store (s16) into %ir.addr)
+ ; CHECK-SLOW-NEXT: [[COPY4:%[0-9]+]]:gpr32 = COPY [[COPY2]].sub_32
+ ; CHECK-SLOW-NEXT: STRHHui [[COPY4]], %ptr, 0 :: (store (s16) into %ir.addr)
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 1
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -296,14 +308,24 @@ machineFunctionInfo: {}
body: |
bb.0:
liveins: $x0, $x1, $x2, $q0
- ; CHECK-LABEL: name: shl_slow_4_more_than_one_use
- ; CHECK: liveins: $x0, $x1, $x2, $q0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
- ; CHECK-NEXT: STRQroX [[COPY2]], [[COPY1]], [[COPY]], 0, 1 :: (store (s128) into %ir.addr)
- ; CHECK-NEXT: STRQroX [[COPY2]], [[COPY1]], [[COPY]], 0, 1 :: (store (s128) into %ir.addr)
+ ; CHECK-FAST-LABEL: name: shl_slow_4_more_than_one_use
+ ; CHECK-FAST: liveins: $x0, $x1, $x2, $q0
+ ; CHECK-FAST-NEXT: {{ $}}
+ ; CHECK-FAST-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-FAST-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+ ; CHECK-FAST-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-FAST-NEXT: STRQroX [[COPY2]], [[COPY1]], [[COPY]], 0, 1 :: (store (s128) into %ir.addr)
+ ; CHECK-FAST-NEXT: STRQroX [[COPY2]], [[COPY1]], [[COPY]], 0, 1 :: (store (s128) into %ir.addr)
+ ;
+ ; CHECK-SLOW-LABEL: name: shl_slow_4_more_than_one_use
+ ; CHECK-SLOW: liveins: $x0, $x1, $x2, $q0
+ ; CHECK-SLOW-NEXT: {{ $}}
+ ; CHECK-SLOW-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK-SLOW-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-SLOW-NEXT: %ptr:gpr64common = ADDXrs [[COPY1]], [[COPY]], 4
+ ; CHECK-SLOW-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+ ; CHECK-SLOW-NEXT: STRQui [[COPY2]], %ptr, 0 :: (store (s128) into %ir.addr)
+ ; CHECK-SLOW-NEXT: STRQui [[COPY2]], %ptr, 0 :: (store (s128) into %ir.addr)
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 4
%2:gpr(s64) = G_SHL %0, %1(s64)
@@ -339,7 +361,3 @@ body: |
%4:gpr(p0) = COPY $x2
G_STORE %4, %ptr :: (store (p0) into %ir.addr)
...
-
-# NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-# CHECK-FAST: {{.*}}
-# CHECK-SLOW: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
index 614ac15d959f0..63dcafed2320a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -28,17 +28,16 @@ define i16 @halfword(ptr %ctx, i32 %xor72) nounwind {
;
; CHECK0-GISEL-LABEL: halfword:
; CHECK0-GISEL: // %bb.0:
-; CHECK0-GISEL-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK0-GISEL-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
; CHECK0-GISEL-NEXT: lsr w8, w1, #9
; CHECK0-GISEL-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK0-GISEL-NEXT: mov x19, x0
-; CHECK0-GISEL-NEXT: and x21, x8, #0xff
-; CHECK0-GISEL-NEXT: ldrh w20, [x0, x21, lsl #1]
+; CHECK0-GISEL-NEXT: add x20, x0, w8, uxtb #1
+; CHECK0-GISEL-NEXT: ldrh w19, [x20]
; CHECK0-GISEL-NEXT: bl foo
-; CHECK0-GISEL-NEXT: mov w0, w20
-; CHECK0-GISEL-NEXT: strh w20, [x19, x21, lsl #1]
+; CHECK0-GISEL-NEXT: mov w0, w19
+; CHECK0-GISEL-NEXT: strh w19, [x20]
; CHECK0-GISEL-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK0-GISEL-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK0-GISEL-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
; CHECK0-GISEL-NEXT: ret
;
; CHECK3-SDAG-LABEL: halfword:
@@ -248,27 +247,23 @@ define i16 @multi_use_half_word(ptr %ctx, i32 %xor72) {
;
; CHECK0-GISEL-LABEL: multi_use_half_word:
; CHECK0-GISEL: // %bb.0: // %entry
-; CHECK0-GISEL-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill
-; CHECK0-GISEL-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; CHECK0-GISEL-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
-; CHECK0-GISEL-NEXT: .cfi_def_cfa_offset 48
+; CHECK0-GISEL-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK0-GISEL-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK0-GISEL-NEXT: .cfi_def_cfa_offset 32
; CHECK0-GISEL-NEXT: .cfi_offset w19, -8
; CHECK0-GISEL-NEXT: .cfi_offset w20, -16
; CHECK0-GISEL-NEXT: .cfi_offset w21, -24
-; CHECK0-GISEL-NEXT: .cfi_offset w22, -32
-; CHECK0-GISEL-NEXT: .cfi_offset w30, -48
+; CHECK0-GISEL-NEXT: .cfi_offset w30, -32
; CHECK0-GISEL-NEXT: lsr w8, w1, #9
-; CHECK0-GISEL-NEXT: mov x19, x0
-; CHECK0-GISEL-NEXT: and x21, x8, #0xff
-; CHECK0-GISEL-NEXT: ldrh w20, [x0, x21, lsl #1]
-; CHECK0-GISEL-NEXT: add w22, w20, #1
+; CHECK0-GISEL-NEXT: add x20, x0, w8, uxtb #1
+; CHECK0-GISEL-NEXT: ldrh w19, [x20]
+; CHECK0-GISEL-NEXT: add w21, w19, #1
; CHECK0-GISEL-NEXT: bl foo
-; CHECK0-GISEL-NEXT: strh w20, [x19, x21, lsl #1]
-; CHECK0-GISEL-NEXT: mov w0, w20
-; CHECK0-GISEL-NEXT: strh w22, [x19, x21, lsl #1]
-; CHECK0-GISEL-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK0-GISEL-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; CHECK0-GISEL-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
+; CHECK0-GISEL-NEXT: strh w19, [x20]
+; CHECK0-GISEL-NEXT: mov w0, w19
+; CHECK0-GISEL-NEXT: strh w21, [x20]
+; CHECK0-GISEL-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK0-GISEL-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK0-GISEL-NEXT: ret
;
; CHECK3-SDAG-LABEL: multi_use_half_word:
@@ -387,14 +382,14 @@ define i128 @gep4(ptr %p, i128 %a, i64 %b) {
;
; CHECK0-GISEL-LABEL: gep4:
; CHECK0-GISEL: // %bb.0:
-; CHECK0-GISEL-NEXT: ldr q1, [x0, x4, lsl #4]
+; CHECK0-GISEL-NEXT: add x8, x0, x4, lsl #4
; CHECK0-GISEL-NEXT: mov v0.d[0], x2
-; CHECK0-GISEL-NEXT: mov x8, x0
+; CHECK0-GISEL-NEXT: ldr q1, [x8]
; CHECK0-GISEL-NEXT: mov d2, v1.d[1]
-; CHECK0-GISEL-NEXT: fmov x0, d1
; CHECK0-GISEL-NEXT: mov v0.d[1], x3
+; CHECK0-GISEL-NEXT: fmov x0, d1
; CHECK0-GISEL-NEXT: fmov x1, d2
-; CHECK0-GISEL-NEXT: str q0, [x8, x4, lsl #4]
+; CHECK0-GISEL-NEXT: str q0, [x8]
; CHECK0-GISEL-NEXT: ret
;
; CHECK3-SDAG-LABEL: gep4:
More information about the llvm-commits
mailing list