[llvm] [DAGCombine] Remove oneuse restrictions for RISCV in folding (shl (add_nsw x, c1)), c2) and folding (shl(sext(add x, c1)), c2) in some scenarios (PR #101294)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 2 20:53:08 PST 2024


https://github.com/LiqinWeng updated https://github.com/llvm/llvm-project/pull/101294

>From 93887495f0bffb920ab5a29cbe9d4b5cbaf4760c Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sat, 3 Aug 2024 19:13:35 +0800
Subject: [PATCH 1/9] [Test] Pre-submit tests for #101294

---
 .../CodeGen/RISCV/add_sext_shl_constant.ll    | 195 ++++++++++++++++++
 llvm/test/CodeGen/RISCV/add_shl_constant.ll   | 132 ++++++++++++
 .../CodeGen/RISCV/riscv-shifted-extend.ll     | 124 -----------
 3 files changed, 327 insertions(+), 124 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
 create mode 100644 llvm/test/CodeGen/RISCV/add_shl_constant.ll
 delete mode 100644 llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll

diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
new file mode 100644
index 00000000000000..35f3656e868681
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64 %s
+
+define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addiw a3, a1, 5
+; RV64-NEXT:    slli a4, a3, 2
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a2, 24(a0)
+; RV64-NEXT:    sw a3, 140(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  store i32 %b, ptr %arrayidx
+  %add3 = add nsw i32 %a, 6
+  %idxprom4 = sext i32 %add3 to i64
+  %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+  store i32 %b, ptr %arrayidx5
+  %add6 = add nsw i32 %a, 35
+  %idxprom7 = sext i32 %add6 to i64
+  %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+  store i32 %add, ptr %arrayidx8
+  ret void
+}
+
+define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a3, a1, 2047
+; RV64-NEXT:    addiw a3, a3, 1
+; RV64-NEXT:    slli a4, a3, 2
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    add a0, a0, a4
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    sw a3, 4(a0)
+; RV64-NEXT:    sw a2, 120(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  store i32 %b, ptr %arrayidx
+  %0 = sext i32 %a to i64
+  %1 = getelementptr i32, ptr %array1, i64 %0
+  %arrayidx3 = getelementptr i8, ptr %1, i64 8196
+  store i32 %add, ptr %arrayidx3
+  %arrayidx6 = getelementptr i8, ptr %1, i64 8312
+  store i32 %b, ptr %arrayidx6
+  ret void
+}
+
+define void @add_sext_shl_moreOneUse_sext(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_sext:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addiw a3, a1, 5
+; RV64-NEXT:    slli a4, a3, 2
+; RV64-NEXT:    add a4, a0, a4
+; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a2, 24(a0)
+; RV64-NEXT:    sd a3, 140(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  store i32 %b, ptr %arrayidx
+  %add3 = add nsw i32 %a, 6
+  %idxprom4 = sext i32 %add3 to i64
+  %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+  store i32 %b, ptr %arrayidx5
+  %add6 = add nsw i32 %a, 35
+  %idxprom7 = sext i32 %add6 to i64
+  %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+  store i64 %idxprom, ptr %arrayidx8
+  ret void
+}
+
+; test of jumpping, find add's operand has one more use can simplified
+define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext  %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addiw a4, a1, 5
+; RV64-NEXT:    slli a5, a4, 2
+; RV64-NEXT:    add a5, a0, a5
+; RV64-NEXT:    mv a6, a4
+; RV64-NEXT:    bgtz a3, .LBB3_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a6, a2
+; RV64-NEXT:  .LBB3_2: # %entry
+; RV64-NEXT:    sw a6, 0(a5)
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a6, 24(a0)
+; RV64-NEXT:    sw a4, 140(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %cmp = icmp sgt i32 %x, 0
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  %add.b = select i1 %cmp, i32 %add, i32 %b
+  store i32 %add.b, ptr %arrayidx
+  %add5 = add nsw i32 %a, 6
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
+  store i32 %add.b, ptr %arrayidx7
+  %add8 = add nsw i32 %a, 35
+  %idxprom9 = sext i32 %add8 to i64
+  %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
+  store i32 %add, ptr %arrayidx10
+  ret void
+}
+
+define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i32 signext  %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a4, a1, 2047
+; RV64-NEXT:    addiw a4, a4, 1
+; RV64-NEXT:    slli a6, a4, 2
+; RV64-NEXT:    add a6, a0, a6
+; RV64-NEXT:    mv a5, a4
+; RV64-NEXT:    bgtz a3, .LBB4_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a5, a2
+; RV64-NEXT:  .LBB4_2: # %entry
+; RV64-NEXT:    sw a5, 0(a6)
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    lui a1, 2
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    sw a5, 4(a0)
+; RV64-NEXT:    sw a4, 120(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %cmp = icmp sgt i32 %x, 0
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  %add.b = select i1 %cmp, i32 %add, i32 %b
+  store i32 %add.b, ptr %arrayidx
+  %0 = sext i32 %a to i64
+  %1 = getelementptr i32, ptr %array1, i64 %0
+  %arrayidx7 = getelementptr i8, ptr %1, i64 8196
+  store i32 %add.b, ptr %arrayidx7
+  %arrayidx10 = getelementptr i8, ptr %1, i64 8312
+  store i32 %add, ptr %arrayidx10
+  ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
+; RV64-LABEL: add_shl_moreOneUse_inSelect:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a4, a1, 5
+; RV64-NEXT:    mv a5, a4
+; RV64-NEXT:    bgtz a3, .LBB5_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a5, a2
+; RV64-NEXT:  .LBB5_2: # %entry
+; RV64-NEXT:    slli a2, a4, 3
+; RV64-NEXT:    add a2, a0, a2
+; RV64-NEXT:    sd a5, 0(a2)
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sd a5, 48(a0)
+; RV64-NEXT:    sd a4, 280(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i64 %a, 5
+  %cmp = icmp sgt i64 %x, 0
+  %spec.select = select i1 %cmp, i64 %add, i64 %b
+  %0 = getelementptr inbounds i64, ptr %array1, i64 %add
+  store i64 %spec.select, ptr %0
+  %add3 = add nsw i64 %a, 6
+  %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
+  store i64 %spec.select, ptr %arrayidx4
+  %add5 = add nsw i64 %a, 35
+  %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
+  store i64 %add, ptr %arrayidx6
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
new file mode 100644
index 00000000000000..5c71a3c5449940
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32 %s
+
+define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
+; RV32-LABEL: add_shl_oneUse:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    addi a0, a0, 984
+; RV32-NEXT:    ret
+  %add.0 = add i32 %x, 123
+  %shl = shl i32 %add.0, 3
+  %add.1 = add i32 %shl, %y
+  ret i32 %add.1
+}
+
+define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b)  {
+; RV32-LABEL: add_shl_moreOneUse_inStore:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a1, 5
+; RV32-NEXT:    slli a4, a3, 2
+; RV32-NEXT:    add a4, a0, a4
+; RV32-NEXT:    sw a2, 0(a4)
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a2, 24(a0)
+; RV32-NEXT:    sw a3, 140(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %b, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx3 = getelementptr i8, ptr %0, i32 24
+  store i32 %b, ptr %arrayidx3
+  %arrayidx5 = getelementptr i8, ptr %0, i32 140
+  store i32 %add, ptr %arrayidx5
+  ret void
+}
+
+define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32 %b)  {
+; RV32-LABEL: add_shl_moreOneUse_inStore_addexceedsign12:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a1, 2047
+; RV32-NEXT:    addi a3, a3, 1
+; RV32-NEXT:    slli a4, a3, 2
+; RV32-NEXT:    add a4, a0, a4
+; RV32-NEXT:    sw a2, 0(a4)
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    lui a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a3, 4(a0)
+; RV32-NEXT:    sw a2, 120(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %b, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx2 = getelementptr i8, ptr %0, i32 8196
+  store i32 %add, ptr %arrayidx2
+  %arrayidx4 = getelementptr i8, ptr %0, i32 8312
+  store i32 %b, ptr %arrayidx4
+  ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a4, a1, 5
+; RV32-NEXT:    mv a5, a4
+; RV32-NEXT:    bgtz a3, .LBB3_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a5, a2
+; RV32-NEXT:  .LBB3_2: # %entry
+; RV32-NEXT:    slli a2, a4, 2
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sw a5, 0(a2)
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a5, 24(a0)
+; RV32-NEXT:    sw a4, 140(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %cmp = icmp sgt i32 %x, 0
+  %cond = select i1 %cmp, i32 %add, i32 %b
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %cond, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx2 = getelementptr i32, ptr %0, i32 6
+  store i32 %cond, ptr %arrayidx2
+  %arrayidx4 = getelementptr i32, ptr %0, i32 35
+  store i32 %add, ptr %arrayidx4
+  ret void
+}
+
+define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect_addexceedsign12:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a4, a1, 2047
+; RV32-NEXT:    addi a4, a4, 1
+; RV32-NEXT:    mv a5, a4
+; RV32-NEXT:    bgtz a3, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a5, a2
+; RV32-NEXT:  .LBB4_2: # %entry
+; RV32-NEXT:    slli a2, a4, 2
+; RV32-NEXT:    add a2, a0, a2
+; RV32-NEXT:    sw a5, 0(a2)
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    lui a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a5, 4(a0)
+; RV32-NEXT:    sw a4, 120(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %cmp = icmp sgt i32 %x, 0
+  %spec.select = select i1 %cmp, i32 %add, i32 %b
+  %0 = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %spec.select, ptr %0, align 4
+  %1 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx4 = getelementptr i8, ptr %1, i32 8196
+  store i32 %spec.select, ptr %arrayidx4
+  %arrayidx6 = getelementptr i8, ptr %1, i32 8312
+  store i32 %add, ptr %arrayidx6
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll b/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
deleted file mode 100644
index c1e7b682200eb1..00000000000000
--- a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64 %s
-
-define void @test(ptr nocapture noundef writeonly %array1, i32 noundef signext %a, i32 noundef signext %b) {
-; RV64-LABEL: test:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a3, a1, 5
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    slli a4, a3, 2
-; RV64-NEXT:    add a1, a1, a0
-; RV64-NEXT:    add a0, a0, a4
-; RV64-NEXT:    sw a2, 0(a0)
-; RV64-NEXT:    sw a2, 24(a1)
-; RV64-NEXT:    sw a3, 140(a1)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i32 %a, 5
-  %idxprom = sext i32 %add to i64
-  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
-  store i32 %b, ptr %arrayidx, align 4
-  %add3 = add nsw i32 %a, 6
-  %idxprom4 = sext i32 %add3 to i64
-  %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
-  store i32 %b, ptr %arrayidx5, align 4
-  %add6 = add nsw i32 %a, 35
-  %idxprom7 = sext i32 %add6 to i64
-  %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
-  store i32 %add, ptr %arrayidx8, align 4
-  ret void
-}
-
-; test of jumpping, find add's operand has one more use can simplified
-define void @test1(ptr nocapture noundef %array1, i32 noundef signext %a, i32 noundef signext %b, i32 noundef signext %x) {
-; RV64-LABEL: test1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a4, a1, 5
-; RV64-NEXT:    slli a5, a4, 2
-; RV64-NEXT:    add a5, a0, a5
-; RV64-NEXT:    mv a6, a4
-; RV64-NEXT:    bgtz a3, .LBB1_2
-; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a6, a2
-; RV64-NEXT:  .LBB1_2: # %entry
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sw a6, 0(a5)
-; RV64-NEXT:    sw a6, 24(a0)
-; RV64-NEXT:    sw a4, 140(a0)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i32 %a, 5
-  %cmp = icmp sgt i32 %x, 0
-  %idxprom = sext i32 %add to i64
-  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
-  %add.b = select i1 %cmp, i32 %add, i32 %b
-  store i32 %add.b, ptr %arrayidx, align 4
-  %add5 = add nsw i32 %a, 6
-  %idxprom6 = sext i32 %add5 to i64
-  %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
-  store i32 %add.b, ptr %arrayidx7, align 4
-  %add8 = add nsw i32 %a, 35
-  %idxprom9 = sext i32 %add8 to i64
-  %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
-  store i32 %add, ptr %arrayidx10, align 4
-  ret void
-}
-
-define void @test2(ptr nocapture noundef writeonly %array1, i64 noundef %a, i64 noundef %b) local_unnamed_addr #0 {
-; RV64-LABEL: test2:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a3, a1, 5
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    slli a4, a3, 3
-; RV64-NEXT:    add a1, a1, a0
-; RV64-NEXT:    add a0, a0, a4
-; RV64-NEXT:    sd a2, 0(a0)
-; RV64-NEXT:    sd a2, 48(a1)
-; RV64-NEXT:    sd a3, 280(a1)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i64 %a, 5
-  %arrayidx = getelementptr inbounds i64, ptr %array1, i64 %add
-  store i64 %b, ptr %arrayidx, align 8
-  %add2 = add nsw i64 %a, 6
-  %arrayidx3 = getelementptr inbounds i64, ptr %array1, i64 %add2
-  store i64 %b, ptr %arrayidx3, align 8
-  %add4 = add nsw i64 %a, 35
-  %arrayidx5 = getelementptr inbounds i64, ptr %array1, i64 %add4
-  store i64 %add, ptr %arrayidx5, align 8
-  ret void
-}
-
-define void @test3(ptr nocapture noundef %array1, i64 noundef %a, i64 noundef %b, i64 noundef %x) {
-; RV64-LABEL: test3:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a4, a1, 5
-; RV64-NEXT:    mv a5, a4
-; RV64-NEXT:    bgtz a3, .LBB3_2
-; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a5, a2
-; RV64-NEXT:  .LBB3_2: # %entry
-; RV64-NEXT:    slli a2, a4, 3
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    add a2, a0, a2
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sd a5, 0(a2)
-; RV64-NEXT:    sd a5, 48(a0)
-; RV64-NEXT:    sd a4, 280(a0)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i64 %a, 5
-  %cmp = icmp sgt i64 %x, 0
-  %spec.select = select i1 %cmp, i64 %add, i64 %b
-  %0 = getelementptr inbounds i64, ptr %array1, i64 %add
-  store i64 %spec.select, ptr %0, align 8
-  %add3 = add nsw i64 %a, 6
-  %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
-  store i64 %spec.select, ptr %arrayidx4, align 8
-  %add5 = add nsw i64 %a, 35
-  %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
-  store i64 %add, ptr %arrayidx6, align 8
-  ret void
-}

>From fd31fe5b25c6bee0e289f81c85130e5a66583097 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Fri, 9 Aug 2024 13:08:36 +0800
Subject: [PATCH 2/9] [DAGCombine] Remove OneUse restriction when folding (shl
 (add x, c1), c2) and (shl (sext (add x, c1)), c2)

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  5 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    |  5 ++
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |  7 +++
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  6 ++
 .../Target/Hexagon/HexagonISelLowering.cpp    | 15 +++++
 llvm/lib/Target/Hexagon/HexagonISelLowering.h |  3 +
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   | 15 +++++
 llvm/lib/Target/PowerPC/PPCISelLowering.h     |  3 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 48 ++++++++++++++++
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 16 ++++++
 llvm/lib/Target/X86/X86ISelLowering.h         |  3 +
 .../CodeGen/RISCV/add_sext_shl_constant.ll    | 56 ++++++++-----------
 llvm/test/CodeGen/RISCV/add_shl_constant.ll   | 24 +++-----
 13 files changed, 153 insertions(+), 53 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 521829675ae7c3..86d2340952c5e2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10230,7 +10230,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
   // Variant of version done on multiply, except mul by a power of 2 is turned
   // into a shift.
   if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
-      N0->hasOneUse() && TLI.isDesirableToCommuteWithShift(N, Level)) {
+      TLI.isDesirableToCommuteWithShift(N, Level)) {
     SDValue N01 = N0.getOperand(1);
     if (SDValue Shl1 =
             DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) {
@@ -10249,8 +10249,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
   // TODO: Should we limit this with isLegalAddImmediate?
   if (N0.getOpcode() == ISD::SIGN_EXTEND &&
       N0.getOperand(0).getOpcode() == ISD::ADD &&
-      N0.getOperand(0)->getFlags().hasNoSignedWrap() && N0->hasOneUse() &&
-      N0.getOperand(0)->hasOneUse() &&
+      N0.getOperand(0)->getFlags().hasNoSignedWrap() &&
       TLI.isDesirableToCommuteWithShift(N, Level)) {
     SDValue Add = N0.getOperand(0);
     SDLoc DL(N0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e1be825fcf7bf3..5b6f20d3d4083e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17931,6 +17931,11 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
   SDValue ShiftLHS = N->getOperand(0);
   EVT VT = N->getValueType(0);
 
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+      !ShiftLHS->hasOneUse())
+    return false;
+
   // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
   // combine it with shift 'N' to let it be lowered to UBFX except:
   // ((x >> C) & mask) << C.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d77508227b076b..56f157b27904e7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1072,6 +1072,13 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
+
+  SDValue ShiftLHS = N->getOperand(0);
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+      !ShiftLHS->hasOneUse())
+    return false;
+
   // Always commute pre-type legalization and right shifts.
   // We're looking for shl(or(x,y),z) patterns.
   if (Level < CombineLevel::AfterLegalizeTypes ||
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 6b290135c5bcba..bf81a36171a3b7 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13880,6 +13880,12 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
+  SDValue ShiftLHS = N->getOperand(0);
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+      !ShiftLHS->hasOneUse())
+    return false;
+
   if (Level == BeforeLegalizeTypes)
     return true;
 
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 2c8d141aa21080..43b3d8d108c4f2 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2152,6 +2152,21 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
   return X.getValueType().isScalarInteger(); // 'tstbit'
 }
 
+bool HexagonTargetLowering::isDesirableToCommuteWithShift(
+    const SDNode *N, CombineLevel Level) const {
+  assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+          N->getOpcode() == ISD::SRL) &&
+         "Expected shift op");
+
+  SDValue ShiftLHS = N->getOperand(0);
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+      !ShiftLHS->hasOneUse())
+    return false;
+
+  return true;
+}
+
 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
   return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
 }
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 3fd961f5a74623..a6bd57630031c4 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -155,6 +155,9 @@ class HexagonTargetLowering : public TargetLowering {
 
   bool hasBitTest(SDValue X, SDValue Y) const override;
 
+  bool isDesirableToCommuteWithShift(const SDNode *N,
+                                     CombineLevel Level) const override;
+
   bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
 
   /// Return true if an FMA operation is faster than a pair of mul and add
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 93a48ce2b8c72d..1fcada1aac6bb7 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19166,3 +19166,18 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
   return Builder.CreateOr(
       Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
 }
+
+bool PPCTargetLowering::isDesirableToCommuteWithShift(
+    const SDNode *N, CombineLevel Level) const {
+  assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+          N->getOpcode() == ISD::SRL) &&
+         "Expected shift op");
+
+  SDValue ShiftLHS = N->getOperand(0);
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+      !ShiftLHS->hasOneUse())
+    return false;
+
+  return true;
+}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 1fd4b83d6c1192..784ee8f5089e04 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1497,6 +1497,9 @@ namespace llvm {
     /// through to determine the optimal load/store instruction format.
     unsigned computeMOFlags(const SDNode *Parent, SDValue N,
                             SelectionDAG &DAG) const;
+
+    bool isDesirableToCommuteWithShift(const SDNode *N,
+                                       CombineLevel Level) const override;
   }; // end class PPCTargetLowering
 
   namespace PPC {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 78dc3cb27a6988..e897c43697b586 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18151,8 +18151,46 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
   SDValue N0 = N->getOperand(0);
   EVT Ty = N0.getValueType();
+
+  // LD/ST will optimize constant Offset extraction, so when AddNode is used by
+  // LD/ST, it can still complete the folding optimization operation performed
+  // above.
+  auto isLDST = [&]() {
+    bool canOptAwlays = false;
+    if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+      for (SDNode *Use : N0->uses()) {
+        // This use is the one we're on right now. Skip it
+        if (Use == N || Use->getOpcode() == ISD::SELECT)
+          continue;
+        if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+          canOptAwlays = false;
+          break;
+        }
+        canOptAwlays = true;
+      }
+    }
+
+    if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+        !N0->getOperand(0)->hasOneUse()) {
+      for (SDNode *Use : N0->getOperand(0)->uses()) {
+        // This use is the one we're on right now. Skip it
+        if (Use == N0.getNode() || Use->getOpcode() == ISD::SELECT)
+          continue;
+        if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+          canOptAwlays = false;
+          break;
+        }
+        canOptAwlays = true;
+      }
+    }
+    return canOptAwlays;
+  };
+
   if (Ty.isScalarInteger() &&
       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
+    if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+      return isLDST();
+    }
     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
     if (C1 && C2) {
@@ -18187,6 +18225,16 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
         return false;
     }
   }
+
+  if ((N0->getOpcode() == ISD::ADD || N0->getOpcode() == ISD::OR) &&
+      !N0->hasOneUse())
+    return false;
+
+  if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+      N0->getOperand(0)->getOpcode() == ISD::ADD &&
+      !(N0->hasOneUse() && N0->getOperand(0)->hasOneUse()))
+    return isLDST();
+
   return true;
 }
 
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d490de06590f78..a1490e6b6b4e4c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60673,3 +60673,19 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
   return TargetLowering::getPrefLoopAlignment();
 }
+
+bool X86TargetLowering::isDesirableToCommuteWithShift(
+    const SDNode *N, CombineLevel Level) const {
+  using namespace llvm::SDPatternMatch;
+  assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+          N->getOpcode() == ISD::SRL) &&
+         "Expected shift op");
+
+  SDValue ShiftLHS = N->getOperand(0);
+  SDValue Add;
+  if (sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))) ||
+      ShiftLHS->hasOneUse())
+    return true;
+
+  return false;
+}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 2db25d6dda061a..0f61bdabafbd5c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1901,6 +1901,9 @@ namespace llvm {
 
     SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
                     SDValue V2) const;
+
+    bool isDesirableToCommuteWithShift(const SDNode *N,
+                                       CombineLevel Level) const override;
   };
 
   namespace X86 {
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index 35f3656e868681..891230c10cc460 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -5,13 +5,11 @@
 define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
 ; RV64-LABEL: add_sext_shl_moreOneUse_add:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a3, a1, 5
-; RV64-NEXT:    slli a4, a3, 2
-; RV64-NEXT:    add a4, a0, a4
-; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    addi a3, a1, 5
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    slli a1, a1, 2
 ; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a2, 20(a0)
 ; RV64-NEXT:    sw a2, 24(a0)
 ; RV64-NEXT:    sw a3, 140(a0)
 ; RV64-NEXT:    ret
@@ -35,15 +33,13 @@ define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b
 ; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi a3, a1, 2047
-; RV64-NEXT:    addiw a3, a3, 1
-; RV64-NEXT:    slli a4, a3, 2
-; RV64-NEXT:    add a4, a0, a4
-; RV64-NEXT:    sw a2, 0(a4)
+; RV64-NEXT:    addi a3, a3, 1
+; RV64-NEXT:    lui a4, 2
 ; RV64-NEXT:    sext.w a1, a1
 ; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    lui a4, 2
 ; RV64-NEXT:    add a0, a0, a4
 ; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    sw a2, 0(a0)
 ; RV64-NEXT:    sw a3, 4(a0)
 ; RV64-NEXT:    sw a2, 120(a0)
 ; RV64-NEXT:    ret
@@ -64,13 +60,11 @@ entry:
 define void @add_sext_shl_moreOneUse_sext(ptr %array1, i32 %a, i32 %b) {
 ; RV64-LABEL: add_sext_shl_moreOneUse_sext:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a3, a1, 5
-; RV64-NEXT:    slli a4, a3, 2
-; RV64-NEXT:    add a4, a0, a4
-; RV64-NEXT:    sw a2, 0(a4)
 ; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    addi a3, a1, 5
 ; RV64-NEXT:    slli a1, a1, 2
 ; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a2, 20(a0)
 ; RV64-NEXT:    sw a2, 24(a0)
 ; RV64-NEXT:    sd a3, 140(a0)
 ; RV64-NEXT:    ret
@@ -94,18 +88,16 @@ entry:
 define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext  %a, i32 %b, i32 signext %x) {
 ; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a4, a1, 5
-; RV64-NEXT:    slli a5, a4, 2
-; RV64-NEXT:    add a5, a0, a5
-; RV64-NEXT:    mv a6, a4
+; RV64-NEXT:    addi a4, a1, 5
+; RV64-NEXT:    mv a5, a4
 ; RV64-NEXT:    bgtz a3, .LBB3_2
 ; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a6, a2
+; RV64-NEXT:    mv a5, a2
 ; RV64-NEXT:  .LBB3_2: # %entry
-; RV64-NEXT:    sw a6, 0(a5)
 ; RV64-NEXT:    slli a1, a1, 2
 ; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sw a6, 24(a0)
+; RV64-NEXT:    sw a5, 20(a0)
+; RV64-NEXT:    sw a5, 24(a0)
 ; RV64-NEXT:    sw a4, 140(a0)
 ; RV64-NEXT:    ret
 entry:
@@ -130,20 +122,18 @@ define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i
 ; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi a4, a1, 2047
-; RV64-NEXT:    addiw a4, a4, 1
-; RV64-NEXT:    slli a6, a4, 2
-; RV64-NEXT:    add a6, a0, a6
-; RV64-NEXT:    mv a5, a4
+; RV64-NEXT:    addi a4, a4, 1
+; RV64-NEXT:    lui a5, 2
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    add a0, a0, a5
+; RV64-NEXT:    mv a1, a4
 ; RV64-NEXT:    bgtz a3, .LBB4_2
 ; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a5, a2
+; RV64-NEXT:    mv a1, a2
 ; RV64-NEXT:  .LBB4_2: # %entry
-; RV64-NEXT:    sw a5, 0(a6)
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a0, a0, a1
-; RV64-NEXT:    lui a1, 2
-; RV64-NEXT:    add a0, a0, a1
-; RV64-NEXT:    sw a5, 4(a0)
+; RV64-NEXT:    sw a1, 0(a0)
+; RV64-NEXT:    sw a1, 4(a0)
 ; RV64-NEXT:    sw a4, 120(a0)
 ; RV64-NEXT:    ret
 entry:
@@ -171,11 +161,9 @@ define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
 ; RV64-NEXT:  # %bb.1: # %entry
 ; RV64-NEXT:    mv a5, a2
 ; RV64-NEXT:  .LBB5_2: # %entry
-; RV64-NEXT:    slli a2, a4, 3
-; RV64-NEXT:    add a2, a0, a2
-; RV64-NEXT:    sd a5, 0(a2)
 ; RV64-NEXT:    slli a1, a1, 3
 ; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sd a5, 40(a0)
 ; RV64-NEXT:    sd a5, 48(a0)
 ; RV64-NEXT:    sd a4, 280(a0)
 ; RV64-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index 5c71a3c5449940..d60fe73fb20bff 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -19,11 +19,9 @@ define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b)  {
 ; RV32-LABEL: add_shl_moreOneUse_inStore:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi a3, a1, 5
-; RV32-NEXT:    slli a4, a3, 2
-; RV32-NEXT:    add a4, a0, a4
-; RV32-NEXT:    sw a2, 0(a4)
 ; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a2, 20(a0)
 ; RV32-NEXT:    sw a2, 24(a0)
 ; RV32-NEXT:    sw a3, 140(a0)
 ; RV32-NEXT:    ret
@@ -44,13 +42,11 @@ define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi a3, a1, 2047
 ; RV32-NEXT:    addi a3, a3, 1
-; RV32-NEXT:    slli a4, a3, 2
-; RV32-NEXT:    add a4, a0, a4
-; RV32-NEXT:    sw a2, 0(a4)
+; RV32-NEXT:    lui a4, 2
 ; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a0, a0, a1
-; RV32-NEXT:    lui a1, 2
-; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    add a0, a0, a4
+; RV32-NEXT:    sw a2, 0(a0)
 ; RV32-NEXT:    sw a3, 4(a0)
 ; RV32-NEXT:    sw a2, 120(a0)
 ; RV32-NEXT:    ret
@@ -75,11 +71,9 @@ define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a5, a2
 ; RV32-NEXT:  .LBB3_2: # %entry
-; RV32-NEXT:    slli a2, a4, 2
-; RV32-NEXT:    add a2, a0, a2
-; RV32-NEXT:    sw a5, 0(a2)
 ; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a5, 20(a0)
 ; RV32-NEXT:    sw a5, 24(a0)
 ; RV32-NEXT:    sw a4, 140(a0)
 ; RV32-NEXT:    ret
@@ -107,13 +101,11 @@ define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i3
 ; RV32-NEXT:  # %bb.1: # %entry
 ; RV32-NEXT:    mv a5, a2
 ; RV32-NEXT:  .LBB4_2: # %entry
-; RV32-NEXT:    slli a2, a4, 2
-; RV32-NEXT:    add a2, a0, a2
-; RV32-NEXT:    sw a5, 0(a2)
+; RV32-NEXT:    lui a2, 2
 ; RV32-NEXT:    slli a1, a1, 2
 ; RV32-NEXT:    add a0, a0, a1
-; RV32-NEXT:    lui a1, 2
-; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    sw a5, 0(a0)
 ; RV32-NEXT:    sw a5, 4(a0)
 ; RV32-NEXT:    sw a4, 120(a0)
 ; RV32-NEXT:    ret

>From 074a6f5c652738e1422a96a616f14d2e678d09a3 Mon Sep 17 00:00:00 2001
From: "Liqin.Weng" <liqin.weng at spacemit.com>
Date: Fri, 1 Nov 2024 10:41:19 +0800
Subject: [PATCH 3/9] fix the comments

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp |  8 ++++++--
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp   |  6 ++++--
 llvm/lib/Target/ARM/ARMISelLowering.cpp         |  6 ++++--
 llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 12 +++++++-----
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp     | 12 +++++++-----
 llvm/lib/Target/X86/X86ISelLowering.cpp         |  5 +++--
 6 files changed, 31 insertions(+), 18 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5b6f20d3d4083e..cc7628acba8d2a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17930,10 +17930,13 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
 
   SDValue ShiftLHS = N->getOperand(0);
   EVT VT = N->getValueType(0);
+  SDValue Add;
+
+  if (!ShiftLHS->hasOneUse())
+    return false;
 
   if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
-      !ShiftLHS->hasOneUse())
+       !ShiftLHS.getOperand(0)->hasOneUse()))
     return false;
 
   // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
@@ -17954,6 +17957,7 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
       }
     }
   }
+
   return true;
 }
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 56f157b27904e7..782d6c656e5e1e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1074,9 +1074,11 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
+  if (!ShiftLHS->hasOneUse())
+    return false;
+
   if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
-      !ShiftLHS->hasOneUse())
+       !ShiftLHS.getOperand(0)->hasOneUse()))
     return false;
 
   // Always commute pre-type legalization and right shifts.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index bf81a36171a3b7..a5e993319f9382 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13881,9 +13881,11 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
+  if (!ShiftLHS->hasOneUse())
+    return false;
+
   if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
-      !ShiftLHS->hasOneUse())
+       !ShiftLHS.getOperand(0)->hasOneUse()))
     return false;
 
   if (Level == BeforeLegalizeTypes)
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 43b3d8d108c4f2..8db875b4adc252 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2154,17 +2154,19 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
 
 bool HexagonTargetLowering::isDesirableToCommuteWithShift(
     const SDNode *N, CombineLevel Level) const {
+  using namespace llvm::SDPatternMatch;
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
-      !ShiftLHS->hasOneUse())
-    return false;
+  SDValue Add;
 
-  return true;
+  if (ShiftLHS->hasOneUse() ||
+      sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
+    return true;
+
+  return false;
 }
 
 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1fcada1aac6bb7..cc6a02553bc885 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19169,15 +19169,17 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
 
 bool PPCTargetLowering::isDesirableToCommuteWithShift(
     const SDNode *N, CombineLevel Level) const {
+  using namespace llvm::SDPatternMatch;
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
-      !ShiftLHS->hasOneUse())
-    return false;
+  SDValue Add;
 
-  return true;
+  if (ShiftLHS->hasOneUse() ||
+      sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
+    return true;
+
+  return false;
 }
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a1490e6b6b4e4c..7a36df779b7bf9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60683,8 +60683,9 @@ bool X86TargetLowering::isDesirableToCommuteWithShift(
 
   SDValue ShiftLHS = N->getOperand(0);
   SDValue Add;
-  if (sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))) ||
-      ShiftLHS->hasOneUse())
+
+  if (ShiftLHS->hasOneUse() ||
+      sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
     return true;
 
   return false;

>From fece61fac232b30ccc41e57c5242e10d32c2853b Mon Sep 17 00:00:00 2001
From: "Liqin.Weng" <liqin.weng at spacemit.com>
Date: Tue, 5 Nov 2024 12:27:50 +0800
Subject: [PATCH 4/9] fix some comments

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp  |  2 --
 llvm/lib/Target/Hexagon/HexagonISelLowering.cpp  | 13 +++++++------
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp      | 12 ++++++------
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp      |  5 ++---
 llvm/lib/Target/X86/X86ISelLowering.cpp          | 12 ++++++------
 llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll |  3 +--
 llvm/test/CodeGen/RISCV/add_shl_constant.ll      |  3 +--
 7 files changed, 23 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index cc7628acba8d2a..e420d921d8131f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17930,7 +17930,6 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
 
   SDValue ShiftLHS = N->getOperand(0);
   EVT VT = N->getValueType(0);
-  SDValue Add;
 
   if (!ShiftLHS->hasOneUse())
     return false;
@@ -17957,7 +17956,6 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
       }
     }
   }
-
   return true;
 }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 8db875b4adc252..8650b7ea25aecb 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2154,19 +2154,20 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
 
 bool HexagonTargetLowering::isDesirableToCommuteWithShift(
     const SDNode *N, CombineLevel Level) const {
-  using namespace llvm::SDPatternMatch;
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
-  SDValue Add;
 
-  if (ShiftLHS->hasOneUse() ||
-      sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
-    return true;
+  if (!ShiftLHS->hasOneUse())
+    return false;
 
-  return false;
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !ShiftLHS.getOperand(0)->hasOneUse()))
+    return false;
+
+  return true;
 }
 
 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index cc6a02553bc885..3008ccc896766e 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19169,17 +19169,17 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
 
 bool PPCTargetLowering::isDesirableToCommuteWithShift(
     const SDNode *N, CombineLevel Level) const {
-  using namespace llvm::SDPatternMatch;
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
-  SDValue Add;
+  if (!ShiftLHS->hasOneUse())
+    return false;
 
-  if (ShiftLHS->hasOneUse() ||
-      sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
-    return true;
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !ShiftLHS.getOperand(0)->hasOneUse()))
+    return false;
 
-  return false;
+  return true;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e897c43697b586..870eee26616b21 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18226,13 +18226,12 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
     }
   }
 
-  if ((N0->getOpcode() == ISD::ADD || N0->getOpcode() == ISD::OR) &&
-      !N0->hasOneUse())
+  if (!N0->hasOneUse())
     return false;
 
   if (N0->getOpcode() == ISD::SIGN_EXTEND &&
       N0->getOperand(0)->getOpcode() == ISD::ADD &&
-      !(N0->hasOneUse() && N0->getOperand(0)->hasOneUse()))
+      !N0->getOperand(0)->hasOneUse())
     return isLDST();
 
   return true;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7a36df779b7bf9..e4145b32d50258 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60676,17 +60676,17 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
 
 bool X86TargetLowering::isDesirableToCommuteWithShift(
     const SDNode *N, CombineLevel Level) const {
-  using namespace llvm::SDPatternMatch;
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
   SDValue ShiftLHS = N->getOperand(0);
-  SDValue Add;
+  if (!ShiftLHS->hasOneUse())
+    return false;
 
-  if (ShiftLHS->hasOneUse() ||
-      sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
-    return true;
+  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+       !ShiftLHS.getOperand(0)->hasOneUse()))
+    return false;
 
-  return false;
+  return true;
 }
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index 891230c10cc460..2c933e68e8307b 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64 %s
+; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=RV64 %s
 
 define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
 ; RV64-LABEL: add_sext_shl_moreOneUse_add:
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index d60fe73fb20bff..6a10147ff54b91 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -1,6 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV32 %s
+; RUN: llc -mtriple=riscv32  < %s | FileCheck -check-prefix=RV32 %s
 
 define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
 ; RV32-LABEL: add_shl_oneUse:

>From ce14489adc07ed2e17cec579bdcd7f970b0ec8ae Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Mon, 11 Nov 2024 10:19:36 +0800
Subject: [PATCH 5/9] fix the comments

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 ++--
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp   | 4 ++--
 llvm/lib/Target/ARM/ARMISelLowering.cpp         | 4 ++--
 llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 4 ++--
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp     | 4 ++--
 llvm/lib/Target/X86/X86ISelLowering.cpp         | 4 ++--
 6 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e420d921d8131f..4c42ed2301dabe 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17934,8 +17934,8 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
   if (!ShiftLHS->hasOneUse())
     return false;
 
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !ShiftLHS.getOperand(0)->hasOneUse()))
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
     return false;
 
   // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 782d6c656e5e1e..48e9af9fe507fb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1077,8 +1077,8 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
   if (!ShiftLHS->hasOneUse())
     return false;
 
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !ShiftLHS.getOperand(0)->hasOneUse()))
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
     return false;
 
   // Always commute pre-type legalization and right shifts.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a5e993319f9382..50a677caea1db4 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13884,8 +13884,8 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
   if (!ShiftLHS->hasOneUse())
     return false;
 
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !ShiftLHS.getOperand(0)->hasOneUse()))
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
     return false;
 
   if (Level == BeforeLegalizeTypes)
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 8650b7ea25aecb..d0b767aa3b7db6 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2163,8 +2163,8 @@ bool HexagonTargetLowering::isDesirableToCommuteWithShift(
   if (!ShiftLHS->hasOneUse())
     return false;
 
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !ShiftLHS.getOperand(0)->hasOneUse()))
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
     return false;
 
   return true;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 3008ccc896766e..0deb5b2f3965a2 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19177,8 +19177,8 @@ bool PPCTargetLowering::isDesirableToCommuteWithShift(
   if (!ShiftLHS->hasOneUse())
     return false;
 
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !ShiftLHS.getOperand(0)->hasOneUse()))
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
     return false;
 
   return true;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e4145b32d50258..215bdd3aedce47 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60684,8 +60684,8 @@ bool X86TargetLowering::isDesirableToCommuteWithShift(
   if (!ShiftLHS->hasOneUse())
     return false;
 
-  if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-       !ShiftLHS.getOperand(0)->hasOneUse()))
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
     return false;
 
   return true;

>From c06290f6dc4639aade7f67a19088584f0f357364 Mon Sep 17 00:00:00 2001
From: "Liqin.Weng" <liqin.weng at spacemit.com>
Date: Thu, 14 Nov 2024 09:51:47 +0800
Subject: [PATCH 6/9] fix the comments

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 870eee26616b21..0470230be8f843 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18156,17 +18156,17 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
   // LD/ST, it can still complete the folding optimization operation performed
   // above.
   auto isLDST = [&]() {
-    bool canOptAwlays = false;
+    bool CanOptAlways = false;
     if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
       for (SDNode *Use : N0->uses()) {
         // This use is the one we're on right now. Skip it
         if (Use == N || Use->getOpcode() == ISD::SELECT)
           continue;
         if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
-          canOptAwlays = false;
+          CanOptAlways = false;
           break;
         }
-        canOptAwlays = true;
+        CanOptAlways = true;
       }
     }
 
@@ -18177,20 +18177,20 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
         if (Use == N0.getNode() || Use->getOpcode() == ISD::SELECT)
           continue;
         if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
-          canOptAwlays = false;
+          CanOptAlways = false;
           break;
         }
-        canOptAwlays = true;
+        CanOptAlways = true;
       }
     }
-    return canOptAwlays;
+    return CanOptAlways;
   };
 
   if (Ty.isScalarInteger() &&
       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
-    if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+    if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse())
       return isLDST();
-    }
+
     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
     if (C1 && C2) {

>From d813414478213d634d95c62eda4bcf1758221de6 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sun, 24 Nov 2024 21:13:24 +0800
Subject: [PATCH 7/9] Move restriction to the default implementation of
 isDesirableToCommuteWithShift for targets

---
 llvm/include/llvm/CodeGen/TargetLowering.h     |  6 ++++++
 .../lib/Target/Hexagon/HexagonISelLowering.cpp | 18 ------------------
 llvm/lib/Target/Hexagon/HexagonISelLowering.h  |  3 ---
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp    | 17 -----------------
 llvm/lib/Target/PowerPC/PPCISelLowering.h      |  3 ---
 .../CodeGen/RISCV/add_sext_shl_constant.ll     | 18 +++++++++---------
 llvm/test/CodeGen/RISCV/add_shl_constant.ll    |  2 +-
 7 files changed, 16 insertions(+), 51 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 6a41094ff933b0..bb0c4b11cf3f5e 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4305,6 +4305,12 @@ class TargetLowering : public TargetLoweringBase {
   /// @param Level the current DAGCombine legalization level.
   virtual bool isDesirableToCommuteWithShift(const SDNode *N,
                                              CombineLevel Level) const {
+    SDValue ShiftLHS = N->getOperand(0);
+    if (!ShiftLHS->hasOneUse())
+      return false;
+    if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+        !ShiftLHS.getOperand(0)->hasOneUse())
+      return false;
     return true;
   }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index d0b767aa3b7db6..2c8d141aa21080 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2152,24 +2152,6 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
   return X.getValueType().isScalarInteger(); // 'tstbit'
 }
 
-bool HexagonTargetLowering::isDesirableToCommuteWithShift(
-    const SDNode *N, CombineLevel Level) const {
-  assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
-          N->getOpcode() == ISD::SRL) &&
-         "Expected shift op");
-
-  SDValue ShiftLHS = N->getOperand(0);
-
-  if (!ShiftLHS->hasOneUse())
-    return false;
-
-  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-      !ShiftLHS.getOperand(0)->hasOneUse())
-    return false;
-
-  return true;
-}
-
 bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
   return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
 }
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index a6bd57630031c4..3fd961f5a74623 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -155,9 +155,6 @@ class HexagonTargetLowering : public TargetLowering {
 
   bool hasBitTest(SDValue X, SDValue Y) const override;
 
-  bool isDesirableToCommuteWithShift(const SDNode *N,
-                                     CombineLevel Level) const override;
-
   bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
 
   /// Return true if an FMA operation is faster than a pair of mul and add
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 0deb5b2f3965a2..93a48ce2b8c72d 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19166,20 +19166,3 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
   return Builder.CreateOr(
       Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
 }
-
-bool PPCTargetLowering::isDesirableToCommuteWithShift(
-    const SDNode *N, CombineLevel Level) const {
-  assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
-          N->getOpcode() == ISD::SRL) &&
-         "Expected shift op");
-
-  SDValue ShiftLHS = N->getOperand(0);
-  if (!ShiftLHS->hasOneUse())
-    return false;
-
-  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-      !ShiftLHS.getOperand(0)->hasOneUse())
-    return false;
-
-  return true;
-}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 784ee8f5089e04..1fd4b83d6c1192 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1497,9 +1497,6 @@ namespace llvm {
     /// through to determine the optimal load/store instruction format.
     unsigned computeMOFlags(const SDNode *Parent, SDValue N,
                             SelectionDAG &DAG) const;
-
-    bool isDesirableToCommuteWithShift(const SDNode *N,
-                                       CombineLevel Level) const override;
   }; // end class PPCTargetLowering
 
   namespace PPC {
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index 2c933e68e8307b..47b6c07cc699e7 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -32,9 +32,9 @@ define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b
 ; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi a3, a1, 2047
-; RV64-NEXT:    addi a3, a3, 1
 ; RV64-NEXT:    lui a4, 2
 ; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    addi a3, a3, 1
 ; RV64-NEXT:    slli a1, a1, 2
 ; RV64-NEXT:    add a0, a0, a4
 ; RV64-NEXT:    add a0, a0, a1
@@ -121,19 +121,19 @@ define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i
 ; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi a4, a1, 2047
-; RV64-NEXT:    addi a4, a4, 1
 ; RV64-NEXT:    lui a5, 2
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    slli a6, a1, 2
+; RV64-NEXT:    addi a1, a4, 1
+; RV64-NEXT:    add a0, a0, a6
 ; RV64-NEXT:    add a0, a0, a5
-; RV64-NEXT:    mv a1, a4
+; RV64-NEXT:    mv a4, a1
 ; RV64-NEXT:    bgtz a3, .LBB4_2
 ; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a1, a2
+; RV64-NEXT:    mv a4, a2
 ; RV64-NEXT:  .LBB4_2: # %entry
-; RV64-NEXT:    sw a1, 0(a0)
-; RV64-NEXT:    sw a1, 4(a0)
-; RV64-NEXT:    sw a4, 120(a0)
+; RV64-NEXT:    sw a4, 0(a0)
+; RV64-NEXT:    sw a4, 4(a0)
+; RV64-NEXT:    sw a1, 120(a0)
 ; RV64-NEXT:    ret
 entry:
   %add = add nsw i32 %a, 2048
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index 6a10147ff54b91..71b61868b8c844 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -40,9 +40,9 @@ define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32
 ; RV32-LABEL: add_shl_moreOneUse_inStore_addexceedsign12:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi a3, a1, 2047
-; RV32-NEXT:    addi a3, a3, 1
 ; RV32-NEXT:    lui a4, 2
 ; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    addi a3, a3, 1
 ; RV32-NEXT:    add a0, a0, a1
 ; RV32-NEXT:    add a0, a0, a4
 ; RV32-NEXT:    sw a2, 0(a0)

>From 4581afb9950bfae6dfaf0dac7b05f519ca549fa5 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Mon, 2 Dec 2024 19:59:36 +0800
Subject: [PATCH 8/9] fix the comments

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0470230be8f843..c3595ef8b534cb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18155,7 +18155,7 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
   // LD/ST will optimize constant Offset extraction, so when AddNode is used by
   // LD/ST, it can still complete the folding optimization operation performed
   // above.
-  auto isLDST = [&]() {
+  auto isUsedByLdSt = [&]() {
     bool CanOptAlways = false;
     if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
       for (SDNode *Use : N0->uses()) {
@@ -18189,7 +18189,7 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
   if (Ty.isScalarInteger() &&
       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
     if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse())
-      return isLDST();
+      return isUsedByLdSt();
 
     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
@@ -18232,7 +18232,7 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
   if (N0->getOpcode() == ISD::SIGN_EXTEND &&
       N0->getOperand(0)->getOpcode() == ISD::ADD &&
       !N0->getOperand(0)->hasOneUse())
-    return isLDST();
+    return isUsedByLdSt();
 
   return true;
 }

>From 9485b4d0de373b29c6dc5cd09c5fe82d99abeca6 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Tue, 3 Dec 2024 12:52:26 +0800
Subject: [PATCH 9/9] remove the isDesirableToCommuteWithShift of X86

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 17 -----------------
 llvm/lib/Target/X86/X86ISelLowering.h   |  3 ---
 2 files changed, 20 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 215bdd3aedce47..d490de06590f78 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60673,20 +60673,3 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
   return TargetLowering::getPrefLoopAlignment();
 }
-
-bool X86TargetLowering::isDesirableToCommuteWithShift(
-    const SDNode *N, CombineLevel Level) const {
-  assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
-          N->getOpcode() == ISD::SRL) &&
-         "Expected shift op");
-
-  SDValue ShiftLHS = N->getOperand(0);
-  if (!ShiftLHS->hasOneUse())
-    return false;
-
-  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
-      !ShiftLHS.getOperand(0)->hasOneUse())
-    return false;
-
-  return true;
-}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 0f61bdabafbd5c..2db25d6dda061a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1901,9 +1901,6 @@ namespace llvm {
 
     SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
                     SDValue V2) const;
-
-    bool isDesirableToCommuteWithShift(const SDNode *N,
-                                       CombineLevel Level) const override;
   };
 
   namespace X86 {



More information about the llvm-commits mailing list