[llvm] [DAGCombine] Remove OneUse restriction when folding (shl (add x, c1), c2) and (shl (sext (add x, c1)), c2) (PR #101294)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 8 22:10:54 PDT 2024
https://github.com/LiqinWeng updated https://github.com/llvm/llvm-project/pull/101294
>From ff521bbccd43bdeaa5721264b01ad935a160e18e Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sat, 3 Aug 2024 19:13:35 +0800
Subject: [PATCH 1/2] [Test] Pre-submit tests for #101294
---
.../CodeGen/RISCV/add_sext_shl_constant.ll | 195 ++++++++++++++++++
llvm/test/CodeGen/RISCV/add_shl_constant.ll | 132 ++++++++++++
2 files changed, 327 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
create mode 100644 llvm/test/CodeGen/RISCV/add_shl_constant.ll
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
new file mode 100644
index 00000000000000..35f3656e868681
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64 %s
+
+define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a3, a1, 5
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 24(a0)
+; RV64-NEXT: sw a3, 140(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ store i32 %b, ptr %arrayidx
+ %add3 = add nsw i32 %a, 6
+ %idxprom4 = sext i32 %add3 to i64
+ %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+ store i32 %b, ptr %arrayidx5
+ %add6 = add nsw i32 %a, 35
+ %idxprom7 = sext i32 %add6 to i64
+ %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+ store i32 %add, ptr %arrayidx8
+ ret void
+}
+
+define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a3, a1, 2047
+; RV64-NEXT: addiw a3, a3, 1
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: lui a4, 2
+; RV64-NEXT: add a0, a0, a4
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: sw a3, 4(a0)
+; RV64-NEXT: sw a2, 120(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ store i32 %b, ptr %arrayidx
+ %0 = sext i32 %a to i64
+ %1 = getelementptr i32, ptr %array1, i64 %0
+ %arrayidx3 = getelementptr i8, ptr %1, i64 8196
+ store i32 %add, ptr %arrayidx3
+ %arrayidx6 = getelementptr i8, ptr %1, i64 8312
+ store i32 %b, ptr %arrayidx6
+ ret void
+}
+
+define void @add_sext_shl_moreOneUse_sext(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_sext:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a3, a1, 5
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 24(a0)
+; RV64-NEXT: sd a3, 140(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ store i32 %b, ptr %arrayidx
+ %add3 = add nsw i32 %a, 6
+ %idxprom4 = sext i32 %add3 to i64
+ %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+ store i32 %b, ptr %arrayidx5
+ %add6 = add nsw i32 %a, 35
+ %idxprom7 = sext i32 %add6 to i64
+ %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+ store i64 %idxprom, ptr %arrayidx8
+ ret void
+}
+
+; test of jumpping, find add's operand has one more use can simplified
+define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a4, a1, 5
+; RV64-NEXT: slli a5, a4, 2
+; RV64-NEXT: add a5, a0, a5
+; RV64-NEXT: mv a6, a4
+; RV64-NEXT: bgtz a3, .LBB3_2
+; RV64-NEXT: # %bb.1: # %entry
+; RV64-NEXT: mv a6, a2
+; RV64-NEXT: .LBB3_2: # %entry
+; RV64-NEXT: sw a6, 0(a5)
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a6, 24(a0)
+; RV64-NEXT: sw a4, 140(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %cmp = icmp sgt i32 %x, 0
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ %add.b = select i1 %cmp, i32 %add, i32 %b
+ store i32 %add.b, ptr %arrayidx
+ %add5 = add nsw i32 %a, 6
+ %idxprom6 = sext i32 %add5 to i64
+ %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
+ store i32 %add.b, ptr %arrayidx7
+ %add8 = add nsw i32 %a, 35
+ %idxprom9 = sext i32 %add8 to i64
+ %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
+ store i32 %add, ptr %arrayidx10
+ ret void
+}
+
+define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i32 signext %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a4, a1, 2047
+; RV64-NEXT: addiw a4, a4, 1
+; RV64-NEXT: slli a6, a4, 2
+; RV64-NEXT: add a6, a0, a6
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: bgtz a3, .LBB4_2
+; RV64-NEXT: # %bb.1: # %entry
+; RV64-NEXT: mv a5, a2
+; RV64-NEXT: .LBB4_2: # %entry
+; RV64-NEXT: sw a5, 0(a6)
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: lui a1, 2
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: sw a5, 4(a0)
+; RV64-NEXT: sw a4, 120(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %cmp = icmp sgt i32 %x, 0
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ %add.b = select i1 %cmp, i32 %add, i32 %b
+ store i32 %add.b, ptr %arrayidx
+ %0 = sext i32 %a to i64
+ %1 = getelementptr i32, ptr %array1, i64 %0
+ %arrayidx7 = getelementptr i8, ptr %1, i64 8196
+ store i32 %add.b, ptr %arrayidx7
+ %arrayidx10 = getelementptr i8, ptr %1, i64 8312
+ store i32 %add, ptr %arrayidx10
+ ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
+; RV64-LABEL: add_shl_moreOneUse_inSelect:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a4, a1, 5
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: bgtz a3, .LBB5_2
+; RV64-NEXT: # %bb.1: # %entry
+; RV64-NEXT: mv a5, a2
+; RV64-NEXT: .LBB5_2: # %entry
+; RV64-NEXT: slli a2, a4, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: sd a5, 0(a2)
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sd a5, 48(a0)
+; RV64-NEXT: sd a4, 280(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i64 %a, 5
+ %cmp = icmp sgt i64 %x, 0
+ %spec.select = select i1 %cmp, i64 %add, i64 %b
+ %0 = getelementptr inbounds i64, ptr %array1, i64 %add
+ store i64 %spec.select, ptr %0
+ %add3 = add nsw i64 %a, 6
+ %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
+ store i64 %spec.select, ptr %arrayidx4
+ %add5 = add nsw i64 %a, 35
+ %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
+ store i64 %add, ptr %arrayidx6
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
new file mode 100644
index 00000000000000..5c71a3c5449940
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32 %s
+
+define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
+; RV32-LABEL: add_shl_oneUse:
+; RV32: # %bb.0:
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: addi a0, a0, 984
+; RV32-NEXT: ret
+ %add.0 = add i32 %x, 123
+ %shl = shl i32 %add.0, 3
+ %add.1 = add i32 %shl, %y
+ ret i32 %add.1
+}
+
+define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b) {
+; RV32-LABEL: add_shl_moreOneUse_inStore:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a3, a1, 5
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: sw a2, 0(a4)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a2, 24(a0)
+; RV32-NEXT: sw a3, 140(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %b, ptr %arrayidx
+ %0 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx3 = getelementptr i8, ptr %0, i32 24
+ store i32 %b, ptr %arrayidx3
+ %arrayidx5 = getelementptr i8, ptr %0, i32 140
+ store i32 %add, ptr %arrayidx5
+ ret void
+}
+
+define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32 %b) {
+; RV32-LABEL: add_shl_moreOneUse_inStore_addexceedsign12:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a3, a1, 2047
+; RV32-NEXT: addi a3, a3, 1
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: sw a2, 0(a4)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: lui a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a3, 4(a0)
+; RV32-NEXT: sw a2, 120(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %b, ptr %arrayidx
+ %0 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx2 = getelementptr i8, ptr %0, i32 8196
+ store i32 %add, ptr %arrayidx2
+ %arrayidx4 = getelementptr i8, ptr %0, i32 8312
+ store i32 %b, ptr %arrayidx4
+ ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a4, a1, 5
+; RV32-NEXT: mv a5, a4
+; RV32-NEXT: bgtz a3, .LBB3_2
+; RV32-NEXT: # %bb.1: # %entry
+; RV32-NEXT: mv a5, a2
+; RV32-NEXT: .LBB3_2: # %entry
+; RV32-NEXT: slli a2, a4, 2
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: sw a5, 0(a2)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a5, 24(a0)
+; RV32-NEXT: sw a4, 140(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %cmp = icmp sgt i32 %x, 0
+ %cond = select i1 %cmp, i32 %add, i32 %b
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %cond, ptr %arrayidx
+ %0 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx2 = getelementptr i32, ptr %0, i32 6
+ store i32 %cond, ptr %arrayidx2
+ %arrayidx4 = getelementptr i32, ptr %0, i32 35
+ store i32 %add, ptr %arrayidx4
+ ret void
+}
+
+define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect_addexceedsign12:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a4, a1, 2047
+; RV32-NEXT: addi a4, a4, 1
+; RV32-NEXT: mv a5, a4
+; RV32-NEXT: bgtz a3, .LBB4_2
+; RV32-NEXT: # %bb.1: # %entry
+; RV32-NEXT: mv a5, a2
+; RV32-NEXT: .LBB4_2: # %entry
+; RV32-NEXT: slli a2, a4, 2
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: sw a5, 0(a2)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: lui a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a5, 4(a0)
+; RV32-NEXT: sw a4, 120(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %cmp = icmp sgt i32 %x, 0
+ %spec.select = select i1 %cmp, i32 %add, i32 %b
+ %0 = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %spec.select, ptr %0, align 4
+ %1 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx4 = getelementptr i8, ptr %1, i32 8196
+ store i32 %spec.select, ptr %arrayidx4
+ %arrayidx6 = getelementptr i8, ptr %1, i32 8312
+ store i32 %add, ptr %arrayidx6
+ ret void
+}
>From 30b6b955ca5bef8c987febcd54d660597fafb94f Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Fri, 9 Aug 2024 13:08:36 +0800
Subject: [PATCH 2/2] [DAGCombine] Remove OneUse restriction when folding (shl
(add x, c1), c2) and (shl (sext (add x, c1)), c2)
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 5 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 5 ++
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 7 +++
llvm/lib/Target/ARM/ARMISelLowering.cpp | 6 +++
.../Target/Hexagon/HexagonISelLowering.cpp | 15 ++++++
llvm/lib/Target/Hexagon/HexagonISelLowering.h | 3 ++
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 15 ++++++
llvm/lib/Target/PowerPC/PPCISelLowering.h | 3 ++
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 48 ++++++++++++++++++
llvm/lib/Target/X86/X86ISelLowering.cpp | 15 ++++++
llvm/lib/Target/X86/X86ISelLowering.h | 3 ++
.../CodeGen/RISCV/add_sext_shl_constant.ll | 50 ++++++++-----------
llvm/test/CodeGen/RISCV/add_shl_constant.ll | 24 +++------
13 files changed, 150 insertions(+), 49 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b35d08b327ef3d..5ccc087579acba 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10070,7 +10070,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// Variant of version done on multiply, except mul by a power of 2 is turned
// into a shift.
if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
- N0->hasOneUse() && TLI.isDesirableToCommuteWithShift(N, Level)) {
+ TLI.isDesirableToCommuteWithShift(N, Level)) {
SDValue N01 = N0.getOperand(1);
if (SDValue Shl1 =
DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) {
@@ -10089,8 +10089,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// TODO: Should we limit this with isLegalAddImmediate?
if (N0.getOpcode() == ISD::SIGN_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::ADD &&
- N0.getOperand(0)->getFlags().hasNoSignedWrap() && N0->hasOneUse() &&
- N0.getOperand(0)->hasOneUse() &&
+ N0.getOperand(0)->getFlags().hasNoSignedWrap() &&
TLI.isDesirableToCommuteWithShift(N, Level)) {
SDValue Add = N0.getOperand(0);
SDLoc DL(N0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1e9da9b819bddc..e3cf66d7bce475 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17518,6 +17518,11 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
SDValue ShiftLHS = N->getOperand(0);
EVT VT = N->getValueType(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
// If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
// combine it with shift 'N' to let it be lowered to UBFX except:
// ((x >> C) & mask) << C.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 2ad91de5663237..b60d1933783e73 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1036,6 +1036,13 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
// Always commute pre-type legalization and right shifts.
// We're looking for shl(or(x,y),z) patterns.
if (Level < CombineLevel::AfterLegalizeTypes ||
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 75d16a42d0205a..ae9499b0b5c86e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13803,6 +13803,12 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
if (Level == BeforeLegalizeTypes)
return true;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 7aeaebc584c64c..3e000e5b61e469 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2156,6 +2156,21 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
return X.getValueType().isScalarInteger(); // 'tstbit'
}
+bool HexagonTargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
+ return true;
+}
+
bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 3fd961f5a74623..a6bd57630031c4 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -155,6 +155,9 @@ class HexagonTargetLowering : public TargetLowering {
bool hasBitTest(SDValue X, SDValue Y) const override;
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
+
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// Return true if an FMA operation is faster than a pair of mul and add
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1686ec572c8553..a1a9978b990877 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -18877,3 +18877,18 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}
+
+bool PPCTargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
+ return true;
+}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 0bdfdcd15441f4..90ea4531596ba3 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1489,6 +1489,9 @@ namespace llvm {
/// through to determine the optimal load/store instruction format.
unsigned computeMOFlags(const SDNode *Parent, SDValue N,
SelectionDAG &DAG) const;
+
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
}; // end class PPCTargetLowering
namespace PPC {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 72aec12158f2c1..05425b435a8660 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17504,8 +17504,46 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
// (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
SDValue N0 = N->getOperand(0);
EVT Ty = N0.getValueType();
+
+ // LD/ST will optimize constant Offset extraction, so when AddNode is used by
+ // LD/ST, it can still complete the folding optimization operation performed
+ // above.
+ auto isLDST = [&]() {
+ bool canOptAwlays = false;
+ if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+ for (SDNode *Use : N0->uses()) {
+ // This use is the one we're on right now. Skip it
+ if (Use == N || Use->getOpcode() == ISD::SELECT)
+ continue;
+ if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+ canOptAwlays = false;
+ break;
+ }
+ canOptAwlays = true;
+ }
+ }
+
+ if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+ !N0->getOperand(0)->hasOneUse()) {
+ for (SDNode *Use : N0->getOperand(0)->uses()) {
+ // This use is the one we're on right now. Skip it
+ if (Use == N0.getNode() || Use->getOpcode() == ISD::SELECT)
+ continue;
+ if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+ canOptAwlays = false;
+ break;
+ }
+ canOptAwlays = true;
+ }
+ }
+ return canOptAwlays;
+ };
+
if (Ty.isScalarInteger() &&
(N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
+ if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+ return isLDST();
+ }
auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (C1 && C2) {
@@ -17540,6 +17578,16 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
return false;
}
}
+
+ if ((N0->getOpcode() == ISD::ADD || N0->getOpcode() == ISD::OR) &&
+ !N0->hasOneUse())
+ return false;
+
+ if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+ N0->getOperand(0)->getOpcode() == ISD::ADD &&
+ !(N0->hasOneUse() && N0->getOperand(0)->hasOneUse()))
+ return isLDST();
+
return true;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b971afda4229ac..43d6af96bfef05 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -59541,3 +59541,18 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
return TargetLowering::getPrefLoopAlignment();
}
+
+bool X86TargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
+ return true;
+}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 362daa98e1f8e0..a96ebc79353a4c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1837,6 +1837,9 @@ namespace llvm {
SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
SDValue V2) const;
+
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
};
namespace X86 {
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index 35f3656e868681..b4a64cc1e61bd0 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -5,13 +5,11 @@
define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
; RV64-LABEL: add_sext_shl_moreOneUse_add:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a3, a1, 5
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: addi a3, a1, 5
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 20(a0)
; RV64-NEXT: sw a2, 24(a0)
; RV64-NEXT: sw a3, 140(a0)
; RV64-NEXT: ret
@@ -35,15 +33,13 @@ define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b
; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a3, a1, 2047
-; RV64-NEXT: addiw a3, a3, 1
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: addi a3, a3, 1
+; RV64-NEXT: lui a4, 2
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: lui a4, 2
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: sw a2, 0(a0)
; RV64-NEXT: sw a3, 4(a0)
; RV64-NEXT: sw a2, 120(a0)
; RV64-NEXT: ret
@@ -94,18 +90,16 @@ entry:
define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext %a, i32 %b, i32 signext %x) {
; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a4, a1, 5
-; RV64-NEXT: slli a5, a4, 2
-; RV64-NEXT: add a5, a0, a5
-; RV64-NEXT: mv a6, a4
+; RV64-NEXT: addi a4, a1, 5
+; RV64-NEXT: mv a5, a4
; RV64-NEXT: bgtz a3, .LBB3_2
; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a6, a2
+; RV64-NEXT: mv a5, a2
; RV64-NEXT: .LBB3_2: # %entry
-; RV64-NEXT: sw a6, 0(a5)
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: sw a6, 24(a0)
+; RV64-NEXT: sw a5, 20(a0)
+; RV64-NEXT: sw a5, 24(a0)
; RV64-NEXT: sw a4, 140(a0)
; RV64-NEXT: ret
entry:
@@ -130,20 +124,18 @@ define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i
; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a4, a1, 2047
-; RV64-NEXT: addiw a4, a4, 1
-; RV64-NEXT: slli a6, a4, 2
-; RV64-NEXT: add a6, a0, a6
-; RV64-NEXT: mv a5, a4
+; RV64-NEXT: addi a4, a4, 1
+; RV64-NEXT: lui a5, 2
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, a0, a5
+; RV64-NEXT: mv a1, a4
; RV64-NEXT: bgtz a3, .LBB4_2
; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a5, a2
+; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB4_2: # %entry
-; RV64-NEXT: sw a5, 0(a6)
-; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: lui a1, 2
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: sw a5, 4(a0)
+; RV64-NEXT: sw a1, 0(a0)
+; RV64-NEXT: sw a1, 4(a0)
; RV64-NEXT: sw a4, 120(a0)
; RV64-NEXT: ret
entry:
@@ -171,11 +163,9 @@ define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a5, a2
; RV64-NEXT: .LBB5_2: # %entry
-; RV64-NEXT: slli a2, a4, 3
-; RV64-NEXT: add a2, a0, a2
-; RV64-NEXT: sd a5, 0(a2)
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sd a5, 40(a0)
; RV64-NEXT: sd a5, 48(a0)
; RV64-NEXT: sd a4, 280(a0)
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index 5c71a3c5449940..d60fe73fb20bff 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -19,11 +19,9 @@ define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b) {
; RV32-LABEL: add_shl_moreOneUse_inStore:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a1, 5
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a4, a0, a4
-; RV32-NEXT: sw a2, 0(a4)
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a2, 20(a0)
; RV32-NEXT: sw a2, 24(a0)
; RV32-NEXT: sw a3, 140(a0)
; RV32-NEXT: ret
@@ -44,13 +42,11 @@ define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a1, 2047
; RV32-NEXT: addi a3, a3, 1
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a4, a0, a4
-; RV32-NEXT: sw a2, 0(a4)
+; RV32-NEXT: lui a4, 2
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: lui a1, 2
-; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a4
+; RV32-NEXT: sw a2, 0(a0)
; RV32-NEXT: sw a3, 4(a0)
; RV32-NEXT: sw a2, 120(a0)
; RV32-NEXT: ret
@@ -75,11 +71,9 @@ define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a5, a2
; RV32-NEXT: .LBB3_2: # %entry
-; RV32-NEXT: slli a2, a4, 2
-; RV32-NEXT: add a2, a0, a2
-; RV32-NEXT: sw a5, 0(a2)
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a5, 20(a0)
; RV32-NEXT: sw a5, 24(a0)
; RV32-NEXT: sw a4, 140(a0)
; RV32-NEXT: ret
@@ -107,13 +101,11 @@ define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i3
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a5, a2
; RV32-NEXT: .LBB4_2: # %entry
-; RV32-NEXT: slli a2, a4, 2
-; RV32-NEXT: add a2, a0, a2
-; RV32-NEXT: sw a5, 0(a2)
+; RV32-NEXT: lui a2, 2
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: lui a1, 2
-; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: sw a5, 0(a0)
; RV32-NEXT: sw a5, 4(a0)
; RV32-NEXT: sw a4, 120(a0)
; RV32-NEXT: ret
More information about the llvm-commits
mailing list