[llvm] [LLVM][AArch64] Correctly lower funnel shifts by zero. (PR #140058)
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Thu May 15 09:03:02 PDT 2025
https://github.com/paulwalker-arm updated https://github.com/llvm/llvm-project/pull/140058
>From 8dd00657e6a1fa4896c1d533f8c3a08ed4078be6 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Thu, 15 May 2025 12:04:28 +0000
Subject: [PATCH 1/3] [LLVM][DAGCombiner] Add command line option to disable
the combiner.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d6e288a59b2ee..2b752498f64a1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -149,6 +149,10 @@ static cl::opt<bool> EnableShrinkLoadReplaceStoreWithStore(
cl::desc("DAG combiner enable load/<replace bytes>/store with "
"a narrower store"));
+static cl::opt<bool> DisableCombines("combiner-disabled", cl::Hidden,
+ cl::init(false),
+ cl::desc("Disable the DAG combiner"));
+
namespace {
class DAGCombiner {
@@ -248,7 +252,8 @@ namespace {
STI(D.getSubtarget().getSelectionDAGInfo()), OptLevel(OL),
BatchAA(BatchAA) {
ForCodeSize = DAG.shouldOptForSize();
- DisableGenericCombines = STI && STI->disableGenericCombines(OptLevel);
+ DisableGenericCombines =
+ DisableCombines || (STI && STI->disableGenericCombines(OptLevel));
MaximumLegalStoreInBits = 0;
// We use the minimum store size here, since that's all we can guarantee
>From 2bfdff851d3c3e30491249dfb377b0983ead4311 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Thu, 15 May 2025 12:35:19 +0000
Subject: [PATCH 2/3] [LLVM][AArch64] Correctly lower funnel shifts by
constants.
Prevent LowerFunnelShift from creating an invalid ISD::FSHR when
lowering "ISD::FSHL X, Y, 0". Such inputs are rare because it's a NOP
that DAGCombiner will optimise away. However, we should not rely on
this and so this PR mirrors the same optimisation.
Ensure LowerFunnelShift normalises constant shift amounts because isel
rules expect them to be in the range [0, src bit length).
NOTE: To simiplify testing, this PR also adds a command line option to
disable the DAG combiner (-combiner-disabled).
---
.../Target/AArch64/AArch64ISelLowering.cpp | 21 +++-
.../CodeGen/AArch64/fsh-combiner-disabled.ll | 116 ++++++++++++++++++
2 files changed, 131 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index fb7f7d6f7537d..9d0c8910b7c03 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7259,21 +7259,30 @@ static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
// FSHL is converted to FSHR before deciding what to do with it
static SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) {
SDValue Shifts = Op.getOperand(2);
- // Check if the shift amount is a constant
+ // Check if the shift amount is a constant and normalise to [0, SrcBitLen)
// If opcode is FSHL, convert it to FSHR
if (auto *ShiftNo = dyn_cast<ConstantSDNode>(Shifts)) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
+ unsigned int NewShiftNo = ShiftNo->getZExtValue() % VT.getFixedSizeInBits();
if (Op.getOpcode() == ISD::FSHL) {
- unsigned int NewShiftNo =
- VT.getFixedSizeInBits() - ShiftNo->getZExtValue();
+ if (NewShiftNo == 0)
+ return Op.getOperand(0);
+
+ NewShiftNo = VT.getFixedSizeInBits() - NewShiftNo;
+ } else if (Op.getOpcode() == ISD::FSHR) {
+ if (NewShiftNo == 0)
+ return Op.getOperand(1);
+
+ if (ShiftNo->getZExtValue() == NewShiftNo)
+ return Op;
+ }
+
+ if (ShiftNo->getZExtValue() != NewShiftNo)
return DAG.getNode(
ISD::FSHR, DL, VT, Op.getOperand(0), Op.getOperand(1),
DAG.getConstant(NewShiftNo, DL, Shifts.getValueType()));
- } else if (Op.getOpcode() == ISD::FSHR) {
- return Op;
- }
}
return SDValue();
diff --git a/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll b/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll
new file mode 100644
index 0000000000000..746a1bed4ec24
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll
@@ -0,0 +1,116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc %s -o - | FileCheck %s
+; RUN: llc -combiner-disabled %s -o - | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Verify lowering code in isolation to ensure we can lower shifts that would
+; normally be optimised away.
+
+define i32 @fshl_i32_by_zero(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshl_i32_by_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 0)
+ ret i32 %r
+}
+
+define i32 @fshl_i32_by_srclen(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshl_i32_by_srclen:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w0, w1
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 32)
+ ret i32 %r
+}
+
+define i32 @fshl_i32_by_srclen_plus1(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshl_i32_by_srclen_plus1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: extr w0, w1, w2, #31
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 33)
+ ret i32 %r
+}
+
+define i64 @fshl_i64_by_zero(i64 %unused, i64 %a, i64 %b) {
+; CHECK-LABEL: fshl_i64_by_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: ret
+ %r = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 0)
+ ret i64 %r
+}
+
+define i64 @fshl_i64_by_srclen(i64 %unused, i64 %a, i64 %b) {
+; CHECK-LABEL: fshl_i64_by_srclen:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: ret
+ %r = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 64)
+ ret i64 %r
+}
+
+define i64 @fshl_i64_by_srclen_plus1(i64 %unused, i64 %a, i64 %b) {
+; CHECK-LABEL: fshl_i64_by_srclen_plus1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: extr x0, x1, x2, #63
+; CHECK-NEXT: ret
+ %r = call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 65)
+ ret i64 %r
+}
+
+define i32 @fshr_i32_by_zero(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshr_i32_by_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w0, w2
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 0)
+ ret i32 %r
+}
+
+define i32 @fshr_i32_by_srclen(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshr_i32_by_srclen:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w0, w2
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 32)
+ ret i32 %r
+}
+
+define i32 @fshr_i32_by_srclen_plus1(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshr_i32_by_srclen_plus1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: extr w0, w1, w2, #1
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 33)
+ ret i32 %r
+}
+
+define i64 @fshr_i64_by_zero(i64 %unused, i64 %a, i64 %b) {
+; CHECK-LABEL: fshr_i64_by_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: ret
+ %r = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 0)
+ ret i64 %r
+}
+
+define i64 @fshr_i64_by_srclen(i64 %unused, i64 %a, i64 %b) {
+; CHECK-LABEL: fshr_i64_by_srclen:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x0, x2
+; CHECK-NEXT: ret
+ %r = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 64)
+ ret i64 %r
+}
+
+define i64 @fshr_i64_by_srclen_plus1(i64 %unused, i64 %a, i64 %b) {
+; CHECK-LABEL: fshr_i64_by_srclen_plus1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: extr x0, x1, x2, #1
+; CHECK-NEXT: ret
+ %r = call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 65)
+ ret i64 %r
+}
>From fbad1e59174314befbe144e20c00cd60c9fe3529 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Thu, 15 May 2025 15:58:57 +0000
Subject: [PATCH 3/3] Ensure half element shifts are lowered correctly.
---
.../lib/Target/AArch64/AArch64ISelLowering.cpp | 7 +++++--
.../CodeGen/AArch64/fsh-combiner-disabled.ll | 18 ++++++++++++++++++
2 files changed, 23 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9d0c8910b7c03..e86832bc48902 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7271,18 +7271,21 @@ static SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) {
return Op.getOperand(0);
NewShiftNo = VT.getFixedSizeInBits() - NewShiftNo;
+ return DAG.getNode(
+ ISD::FSHR, DL, VT, Op.getOperand(0), Op.getOperand(1),
+ DAG.getConstant(NewShiftNo, DL, Shifts.getValueType()));
} else if (Op.getOpcode() == ISD::FSHR) {
if (NewShiftNo == 0)
return Op.getOperand(1);
if (ShiftNo->getZExtValue() == NewShiftNo)
return Op;
- }
- if (ShiftNo->getZExtValue() != NewShiftNo)
+ // Rewrite using the normalised shift amount.
return DAG.getNode(
ISD::FSHR, DL, VT, Op.getOperand(0), Op.getOperand(1),
DAG.getConstant(NewShiftNo, DL, Shifts.getValueType()));
+ }
}
return SDValue();
diff --git a/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll b/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll
index 746a1bed4ec24..091fe483e7d8d 100644
--- a/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll
+++ b/llvm/test/CodeGen/AArch64/fsh-combiner-disabled.ll
@@ -16,6 +16,15 @@ define i32 @fshl_i32_by_zero(i32 %unused, i32 %a, i32 %b) {
ret i32 %r
}
+define i32 @fshl_i32_by_half_srclen(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshl_i32_by_half_srclen:
+; CHECK: // %bb.0:
+; CHECK-NEXT: extr w0, w1, w2, #16
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 16)
+ ret i32 %r
+}
+
define i32 @fshl_i32_by_srclen(i32 %unused, i32 %a, i32 %b) {
; CHECK-LABEL: fshl_i32_by_srclen:
; CHECK: // %bb.0:
@@ -79,6 +88,15 @@ define i32 @fshr_i32_by_srclen(i32 %unused, i32 %a, i32 %b) {
ret i32 %r
}
+define i32 @fshr_i32_by_half_srclen(i32 %unused, i32 %a, i32 %b) {
+; CHECK-LABEL: fshr_i32_by_half_srclen:
+; CHECK: // %bb.0:
+; CHECK-NEXT: extr w0, w1, w2, #16
+; CHECK-NEXT: ret
+ %r = call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 16)
+ ret i32 %r
+}
+
define i32 @fshr_i32_by_srclen_plus1(i32 %unused, i32 %a, i32 %b) {
; CHECK-LABEL: fshr_i32_by_srclen_plus1:
; CHECK: // %bb.0:
More information about the llvm-commits
mailing list