[llvm] [DAG] Remove OneUse restriction when folding (shl (add x, c1), c2) (PR #101294)

via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 3 05:07:47 PDT 2024


https://github.com/LiqinWeng updated https://github.com/llvm/llvm-project/pull/101294

>From 9ea6962dbb20d0d6fb6085589a37c7ce4095ce81 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sat, 3 Aug 2024 19:13:35 +0800
Subject: [PATCH 1/2] [Test] Pre-submit tests for #68972

---
 llvm/test/CodeGen/RISCV/add_shl_constant.ll | 62 +++++++++++++++++++++
 1 file changed, 62 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/add_shl_constant.ll

diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
new file mode 100644
index 0000000000000..59bfdfef8bed7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32 %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64 %s
+
+define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
+; RV32-LABEL: add_shl_oneUse:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    addi a0, a0, 984
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: add_shl_oneUse:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 3
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    addiw a0, a0, 984
+; RV64-NEXT:    ret
+  %add.0 = add i32 %x, 123
+  %shl = shl i32 %add.0, 3
+  %add.1 = add i32 %shl, %y
+  ret i32 %add.1
+}
+
+define void @add_shl_addmoreOneUse_in_store(ptr %array1, i32 %a, i32 %b)  {
+; RV32-LABEL: add_shl_addmoreOneUse_in_store:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a1, 5
+; RV32-NEXT:    slli a4, a3, 2
+; RV32-NEXT:    add a4, a0, a4
+; RV32-NEXT:    sw a2, 0(a4)
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a2, 24(a0)
+; RV32-NEXT:    sw a3, 140(a0)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: add_shl_addmoreOneUse_in_store:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addiw a3, a1, 5
+; RV64-NEXT:    slli a4, a3, 2
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    sw a2, 24(a0)
+; RV64-NEXT:    sw a3, 140(a0)
+; RV64-NEXT:    ret
+entry:
+   %add = add nsw i32 %a, 5
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %b, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx3 = getelementptr i8, ptr %0, i32 24
+  store i32 %b, ptr %arrayidx3
+  %arrayidx5 = getelementptr i8, ptr %0, i32 140
+  store i32 %add, ptr %arrayidx5
+  ret void
+}

>From 8ece18ef34bd52b8785eba61a0e822598f98eecf Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sat, 3 Aug 2024 20:06:24 +0800
Subject: [PATCH 2/2] [DAGCombine] Remove OneUse restriction when folding (shl
 (add x, c1), c2)

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  34 ++++--
 llvm/test/CodeGen/RISCV/add_shl_constant.ll   |   4 +-
 .../CodeGen/RISCV/riscv-shifted-extend.ll     | 113 +++++++-----------
 3 files changed, 67 insertions(+), 84 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b35d08b327ef3..5c9001f22cadc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10070,17 +10070,29 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
   // Variant of version done on multiply, except mul by a power of 2 is turned
   // into a shift.
   if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
-      N0->hasOneUse() && TLI.isDesirableToCommuteWithShift(N, Level)) {
-    SDValue N01 = N0.getOperand(1);
-    if (SDValue Shl1 =
-            DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) {
-      SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
-      AddToWorklist(Shl0.getNode());
-      SDNodeFlags Flags;
-      // Preserve the disjoint flag for Or.
-      if (N0.getOpcode() == ISD::OR && N0->getFlags().hasDisjoint())
-        Flags.setDisjoint(true);
-      return DAG.getNode(N0.getOpcode(), DL, VT, Shl0, Shl1, Flags);
+      TLI.isDesirableToCommuteWithShift(N, Level)) {
+    // ST will optimize constant Offset extraction, so when AddNode
+    // is used by ST, it can still complete the folding optimization
+    // operation performed above.
+    bool canOptAwlays = false;
+    if (!N0.hasOneUse() && N0.getOpcode() == ISD::ADD) {
+      for (SDNode *Use : N0->uses())
+        if (isa<StoreSDNode>(Use))
+          canOptAwlays = true;
+    }
+    if (N0.hasOneUse() || canOptAwlays) {
+      SDValue N01 = N0.getOperand(1);
+      if (SDValue Shl1 =
+              DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) {
+        SDValue Shl0 =
+            DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1);
+        AddToWorklist(Shl0.getNode());
+        SDNodeFlags Flags;
+        // Preserve the disjoint flag for Or.
+        if (N0.getOpcode() == ISD::OR && N0->getFlags().hasDisjoint())
+          Flags.setDisjoint(true);
+        return DAG.getNode(N0.getOpcode(), DL, VT, Shl0, Shl1, Flags);
+      }
     }
   }
 
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index 59bfdfef8bed7..2808faf784f73 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -28,11 +28,9 @@ define void @add_shl_addmoreOneUse_in_store(ptr %array1, i32 %a, i32 %b)  {
 ; RV32-LABEL: add_shl_addmoreOneUse_in_store:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi a3, a1, 5
-; RV32-NEXT:    slli a4, a3, 2
-; RV32-NEXT:    add a4, a0, a4
-; RV32-NEXT:    sw a2, 0(a4)
 ; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a2, 20(a0)
 ; RV32-NEXT:    sw a2, 24(a0)
 ; RV32-NEXT:    sw a3, 140(a0)
 ; RV32-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll b/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
index 957f44f9f669d..12734e9f8ba89 100644
--- a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
@@ -2,123 +2,96 @@
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64 %s
 
-define void @test(ptr nocapture noundef writeonly %array1, i32 noundef signext %a, i32 noundef signext %b) {
-; RV64-LABEL: test:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a3, a1, 5
-; RV64-NEXT:    slli a4, a3, 2
-; RV64-NEXT:    add a4, a0, a4
-; RV64-NEXT:    sw a2, 0(a4)
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sw a2, 24(a0)
-; RV64-NEXT:    sw a3, 140(a0)
-; RV64-NEXT:    ret
+define void @test(ptr %ptr, i32 %a, i32 %b) {
+entry:
+  %add = add nsw i32 %a, 10
+  %idx = sext i32 %add to i64
+  %gep = getelementptr inbounds i32, ptr %ptr, i64 %idx
+  store i32 %b, ptr %gep
+  ret void
+}
+
+define  i32 @test_add_res_moreoneuse(ptr %ptr, i32 %a, i32 %b) {
+entry:
+  %add = add nsw i32 %a, 5
+  %idx = sext i32 %add to i64
+  %gep = getelementptr inbounds i32, ptr %ptr, i64 %idx
+  store i32 %b, ptr %gep
+  ret i32 %add
+}
+
+define void @test_addop_nonsw_flag(ptr %ptr, i32 %a, i32 %b) {
+entry:
+  %add = add i32 %a, 10
+  %idx = sext i32 %add to i64
+  %gep = getelementptr inbounds i32, ptr %ptr, i64 %idx
+  store i32 %b, ptr %gep
+  ret void
+}
+
+define void @add_shl_add_use_moreoneuse(ptr %array1, i32 %a, i32 %b) {
 entry:
   %add = add nsw i32 %a, 5
   %idxprom = sext i32 %add to i64
   %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
-  store i32 %b, ptr %arrayidx, align 4
+  store i32 %b, ptr %arrayidx
   %add3 = add nsw i32 %a, 6
   %idxprom4 = sext i32 %add3 to i64
   %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
-  store i32 %b, ptr %arrayidx5, align 4
+  store i32 %b, ptr %arrayidx5
   %add6 = add nsw i32 %a, 35
   %idxprom7 = sext i32 %add6 to i64
   %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
-  store i32 %add, ptr %arrayidx8, align 4
+  store i32 %add, ptr %arrayidx8
   ret void
 }
 
 ; test of jumpping, find add's operand has one more use can simplified
-define void @test1(ptr nocapture noundef %array1, i32 noundef signext %a, i32 noundef signext %b, i32 noundef signext %x) {
-; RV64-LABEL: test1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a4, a1, 5
-; RV64-NEXT:    slli a5, a4, 2
-; RV64-NEXT:    add a5, a0, a5
-; RV64-NEXT:    mv a6, a4
-; RV64-NEXT:    bgtz a3, .LBB1_2
-; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a6, a2
-; RV64-NEXT:  .LBB1_2: # %entry
-; RV64-NEXT:    sw a6, 0(a5)
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sw a6, 24(a0)
-; RV64-NEXT:    sw a4, 140(a0)
-; RV64-NEXT:    ret
+define void @test1(ptr %array1, i32 %a, i32 %b, i32 %x) {
 entry:
   %add = add nsw i32 %a, 5
   %cmp = icmp sgt i32 %x, 0
   %idxprom = sext i32 %add to i64
   %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
   %add.b = select i1 %cmp, i32 %add, i32 %b
-  store i32 %add.b, ptr %arrayidx, align 4
+  store i32 %add.b, ptr %arrayidx, 
   %add5 = add nsw i32 %a, 6
   %idxprom6 = sext i32 %add5 to i64
   %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
-  store i32 %add.b, ptr %arrayidx7, align 4
+  store i32 %add.b, ptr %arrayidx7, 
   %add8 = add nsw i32 %a, 35
   %idxprom9 = sext i32 %add8 to i64
   %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
-  store i32 %add, ptr %arrayidx10, align 4
+  store i32 %add, ptr %arrayidx10, 
   ret void
 }
 
-define void @test2(ptr nocapture noundef writeonly %array1, i64 noundef %a, i64 noundef %b) local_unnamed_addr #0 {
-; RV64-LABEL: test2:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a3, a1, 5
-; RV64-NEXT:    slli a4, a3, 3
-; RV64-NEXT:    add a4, a0, a4
-; RV64-NEXT:    sd a2, 0(a4)
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sd a2, 48(a0)
-; RV64-NEXT:    sd a3, 280(a0)
-; RV64-NEXT:    ret
+define void @test2(ptr %array1, i64 %a, i64 %b) {
 entry:
   %add = add nsw i64 %a, 5
   %arrayidx = getelementptr inbounds i64, ptr %array1, i64 %add
-  store i64 %b, ptr %arrayidx, align 8
+  store i64 %b, ptr %arrayidx 
   %add2 = add nsw i64 %a, 6
   %arrayidx3 = getelementptr inbounds i64, ptr %array1, i64 %add2
-  store i64 %b, ptr %arrayidx3, align 8
+  store i64 %b, ptr %arrayidx3
   %add4 = add nsw i64 %a, 35
   %arrayidx5 = getelementptr inbounds i64, ptr %array1, i64 %add4
-  store i64 %add, ptr %arrayidx5, align 8
+  store i64 %add, ptr %arrayidx5
   ret void
 }
 
-define void @test3(ptr nocapture noundef %array1, i64 noundef %a, i64 noundef %b, i64 noundef %x) {
-; RV64-LABEL: test3:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a4, a1, 5
-; RV64-NEXT:    mv a5, a4
-; RV64-NEXT:    bgtz a3, .LBB3_2
-; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a5, a2
-; RV64-NEXT:  .LBB3_2: # %entry
-; RV64-NEXT:    slli a2, a4, 3
-; RV64-NEXT:    add a2, a0, a2
-; RV64-NEXT:    sd a5, 0(a2)
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sd a5, 48(a0)
-; RV64-NEXT:    sd a4, 280(a0)
-; RV64-NEXT:    ret
+define void @test3(ptr %array1, i64 noundef %a, i64 noundef %b, i64 noundef %x) {
 entry:
   %add = add nsw i64 %a, 5
   %cmp = icmp sgt i64 %x, 0
   %spec.select = select i1 %cmp, i64 %add, i64 %b
   %0 = getelementptr inbounds i64, ptr %array1, i64 %add
-  store i64 %spec.select, ptr %0, align 8
+  store i64 %spec.select, ptr %0
   %add3 = add nsw i64 %a, 6
   %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
-  store i64 %spec.select, ptr %arrayidx4, align 8
+  store i64 %spec.select, ptr %arrayidx4
   %add5 = add nsw i64 %a, 35
   %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
-  store i64 %add, ptr %arrayidx6, align 8
+  store i64 %add, ptr %arrayidx6
   ret void
 }



More information about the llvm-commits mailing list