[llvm] [AArch64] merge index address with large offset into base address (PR #72187)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 20 23:15:37 PST 2023
https://github.com/vfdff updated https://github.com/llvm/llvm-project/pull/72187
>From 4e685fca5258e58671e898f5239ebc4265186d43 Mon Sep 17 00:00:00 2001
From: zhongyunde 00443407 <zhongyunde at huawei.com>
Date: Mon, 20 Nov 2023 01:13:43 -0500
Subject: [PATCH] [AArch64] merge index address with large offset into base
address
A case for this transformation, https://gcc.godbolt.org/z/nhYcWq1WE
```
Fold
mov w8, #56952
movk w8, #15, lsl #16
ldrb w0, [x0, x8]
into
add x0, x0, 1036288
ldrb w0, [x0, 3704]
```
Only `LDRBBroX` is supported for the first time.
Fix https://github.com/llvm/llvm-project/issues/71917
---
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 57 +++++++++++++++++++
llvm/test/CodeGen/AArch64/arm64-addrmode.ll | 25 ++++++++
2 files changed, 82 insertions(+)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 7617dccdeee397f..e8b51bfbb98cbbb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -1069,6 +1069,38 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSigned
return true;
}
+// 16-bit optionally shifted immediates are legal for single mov.
+static bool isLegalSingleMOVImmediate(int64_t Immed) {
+ if (Immed == std::numeric_limits<int64_t>::min()) {
+ LLVM_DEBUG(dbgs() << "Illegal single mov imm " << Immed
+ << ": avoid UB for INT64_MIN\n");
+ return false;
+ }
+ // The shift value can be 0(default), 16, 32, 48
+ bool IsLegal =
+ ((Immed >> 16) == 0 || ((Immed & 0xffff) == 0 && Immed >> 32 == 0) ||
+ ((Immed & 0xffffffff) == 0 && Immed >> 48 == 0) ||
+ ((Immed & 0xffffffffffff) == 0));
+ LLVM_DEBUG(dbgs() << "Is " << Immed << " legal single mov imm: "
+ << (IsLegal ? "yes" : "no") << "\n");
+ return IsLegal;
+}
+
+// Check whether a unsigned vaule is not in the immediate range of mov but in
+// the immediate range of imm24.
+static bool isPreferredBaseAddrMode(int64_t ImmOff) {
+ // If the immediate already can be encoded in mov, then just keep the existing
+ // logic.
+ if (isLegalSingleMOVImmediate(ImmOff))
+ return false;
+
+ // For a imm24, its low imm12 can be fold as the immediate of load or store,
+ // and its high part can be encoded in an add.
+ if (ImmOff >> 24 == 0)
+ return true;
+ return false;
+}
+
/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
/// immediate" address. The "Size" argument is the size in bytes of the memory
/// reference, which determines the scale.
@@ -1110,6 +1142,24 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64);
return true;
}
+
+ // Perfer [Reg + imm] mode.
+ // ADD BaseReg, WideImmediate & 0x0fff000
+ // LDR X2, [BaseReg, WideImmediate & 0x0fff]
+ SDValue LHS = N.getOperand(0);
+ if (isPreferredBaseAddrMode(RHSC)) {
+ int64_t ImmOffUnScale = RHSC;
+ int64_t ImmOffLow = ImmOffUnScale & 0x0fff;
+ int64_t ImmOffHigh = RHSC - ImmOffLow;
+ SDValue ImmHighSDV =
+ CurDAG->getTargetConstant(ImmOffHigh >> 12, dl, MVT::i64);
+ Base = SDValue(CurDAG->getMachineNode(
+ AArch64::ADDXri, dl, MVT::i64, LHS, ImmHighSDV,
+ CurDAG->getTargetConstant(12, dl, MVT::i32)),
+ 0);
+ OffImm = CurDAG->getTargetConstant(ImmOffLow >> Scale, dl, MVT::i64);
+ return true;
+ }
}
}
@@ -1351,6 +1401,13 @@ bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
return true;
}
+ // Perfer [Reg + imm] mode, so skip this scenarios.
+ if (auto *OffsetC = dyn_cast<ConstantSDNode>(RHS)) {
+ int64_t ImmOff = (int64_t)OffsetC->getZExtValue();
+ if (isPreferredBaseAddrMode(ImmOff)) {
+ return false;
+ }
+ }
// Match any non-shifted, non-extend, non-immediate add expression.
Base = LHS;
Offset = RHS;
diff --git a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
index 69c558d9d5599dc..e78a9123c7d86bc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll
@@ -209,3 +209,28 @@ define void @t17(i64 %a) {
%3 = load volatile i64, ptr %2, align 8
ret void
}
+
+; https://gcc.godbolt.org/z/ErhhdxMv3
+define i32 @LdOffset_i8(ptr nocapture noundef readonly %a) {
+; CHECK-LABEL: LdOffset_i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add x8, x0, #253, lsl #12 // =1036288
+; CHECK-NEXT: ldrb w0, [x8, #3704]
+; CHECK-NEXT: ret
+ %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
+ %val = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %val to i32
+ ret i32 %conv
+}
+
+define i32 @LdOffset_i16(ptr nocapture noundef readonly %a) {
+; CHECK-LABEL: LdOffset_i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add x8, x0, #507, lsl #12 // =2076672
+; CHECK-NEXT: ldrsh w0, [x8, #3312]
+; CHECK-NEXT: ret
+ %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
+ %val = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %val to i32
+ ret i32 %conv
+}
More information about the llvm-commits
mailing list