[llvm] [DAGCombine] Remove oneuse restrictions for RISCV in folding (shl (add_nsw x, c1)), c2) and folding (shl(sext(add x, c1)), c2) in some scenarios (PR #101294)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Nov 23 19:42:19 PST 2024
https://github.com/LiqinWeng updated https://github.com/llvm/llvm-project/pull/101294
>From 328e22351c0c570389932aa4d387e1fb1f0141ab Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sat, 3 Aug 2024 19:13:35 +0800
Subject: [PATCH 1/7] [Test] Pre-submit tests for #101294
---
.../CodeGen/RISCV/add_sext_shl_constant.ll | 195 ++++++++++++++++++
llvm/test/CodeGen/RISCV/add_shl_constant.ll | 132 ++++++++++++
.../CodeGen/RISCV/riscv-shifted-extend.ll | 124 -----------
3 files changed, 327 insertions(+), 124 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
create mode 100644 llvm/test/CodeGen/RISCV/add_shl_constant.ll
delete mode 100644 llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
new file mode 100644
index 00000000000000..35f3656e868681
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64 %s
+
+define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a3, a1, 5
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 24(a0)
+; RV64-NEXT: sw a3, 140(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ store i32 %b, ptr %arrayidx
+ %add3 = add nsw i32 %a, 6
+ %idxprom4 = sext i32 %add3 to i64
+ %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+ store i32 %b, ptr %arrayidx5
+ %add6 = add nsw i32 %a, 35
+ %idxprom7 = sext i32 %add6 to i64
+ %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+ store i32 %add, ptr %arrayidx8
+ ret void
+}
+
+define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a3, a1, 2047
+; RV64-NEXT: addiw a3, a3, 1
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: lui a4, 2
+; RV64-NEXT: add a0, a0, a4
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: sw a3, 4(a0)
+; RV64-NEXT: sw a2, 120(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ store i32 %b, ptr %arrayidx
+ %0 = sext i32 %a to i64
+ %1 = getelementptr i32, ptr %array1, i64 %0
+ %arrayidx3 = getelementptr i8, ptr %1, i64 8196
+ store i32 %add, ptr %arrayidx3
+ %arrayidx6 = getelementptr i8, ptr %1, i64 8312
+ store i32 %b, ptr %arrayidx6
+ ret void
+}
+
+define void @add_sext_shl_moreOneUse_sext(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_sext:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a3, a1, 5
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a4, a0, a4
+; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 24(a0)
+; RV64-NEXT: sd a3, 140(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ store i32 %b, ptr %arrayidx
+ %add3 = add nsw i32 %a, 6
+ %idxprom4 = sext i32 %add3 to i64
+ %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+ store i32 %b, ptr %arrayidx5
+ %add6 = add nsw i32 %a, 35
+ %idxprom7 = sext i32 %add6 to i64
+ %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+ store i64 %idxprom, ptr %arrayidx8
+ ret void
+}
+
+; test of jumpping, find add's operand has one more use can simplified
+define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addiw a4, a1, 5
+; RV64-NEXT: slli a5, a4, 2
+; RV64-NEXT: add a5, a0, a5
+; RV64-NEXT: mv a6, a4
+; RV64-NEXT: bgtz a3, .LBB3_2
+; RV64-NEXT: # %bb.1: # %entry
+; RV64-NEXT: mv a6, a2
+; RV64-NEXT: .LBB3_2: # %entry
+; RV64-NEXT: sw a6, 0(a5)
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a6, 24(a0)
+; RV64-NEXT: sw a4, 140(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %cmp = icmp sgt i32 %x, 0
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ %add.b = select i1 %cmp, i32 %add, i32 %b
+ store i32 %add.b, ptr %arrayidx
+ %add5 = add nsw i32 %a, 6
+ %idxprom6 = sext i32 %add5 to i64
+ %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
+ store i32 %add.b, ptr %arrayidx7
+ %add8 = add nsw i32 %a, 35
+ %idxprom9 = sext i32 %add8 to i64
+ %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
+ store i32 %add, ptr %arrayidx10
+ ret void
+}
+
+define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i32 signext %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a4, a1, 2047
+; RV64-NEXT: addiw a4, a4, 1
+; RV64-NEXT: slli a6, a4, 2
+; RV64-NEXT: add a6, a0, a6
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: bgtz a3, .LBB4_2
+; RV64-NEXT: # %bb.1: # %entry
+; RV64-NEXT: mv a5, a2
+; RV64-NEXT: .LBB4_2: # %entry
+; RV64-NEXT: sw a5, 0(a6)
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: lui a1, 2
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: sw a5, 4(a0)
+; RV64-NEXT: sw a4, 120(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %cmp = icmp sgt i32 %x, 0
+ %idxprom = sext i32 %add to i64
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+ %add.b = select i1 %cmp, i32 %add, i32 %b
+ store i32 %add.b, ptr %arrayidx
+ %0 = sext i32 %a to i64
+ %1 = getelementptr i32, ptr %array1, i64 %0
+ %arrayidx7 = getelementptr i8, ptr %1, i64 8196
+ store i32 %add.b, ptr %arrayidx7
+ %arrayidx10 = getelementptr i8, ptr %1, i64 8312
+ store i32 %add, ptr %arrayidx10
+ ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
+; RV64-LABEL: add_shl_moreOneUse_inSelect:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: addi a4, a1, 5
+; RV64-NEXT: mv a5, a4
+; RV64-NEXT: bgtz a3, .LBB5_2
+; RV64-NEXT: # %bb.1: # %entry
+; RV64-NEXT: mv a5, a2
+; RV64-NEXT: .LBB5_2: # %entry
+; RV64-NEXT: slli a2, a4, 3
+; RV64-NEXT: add a2, a0, a2
+; RV64-NEXT: sd a5, 0(a2)
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sd a5, 48(a0)
+; RV64-NEXT: sd a4, 280(a0)
+; RV64-NEXT: ret
+entry:
+ %add = add nsw i64 %a, 5
+ %cmp = icmp sgt i64 %x, 0
+ %spec.select = select i1 %cmp, i64 %add, i64 %b
+ %0 = getelementptr inbounds i64, ptr %array1, i64 %add
+ store i64 %spec.select, ptr %0
+ %add3 = add nsw i64 %a, 6
+ %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
+ store i64 %spec.select, ptr %arrayidx4
+ %add5 = add nsw i64 %a, 35
+ %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
+ store i64 %add, ptr %arrayidx6
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
new file mode 100644
index 00000000000000..5c71a3c5449940
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32 %s
+
+define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
+; RV32-LABEL: add_shl_oneUse:
+; RV32: # %bb.0:
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: addi a0, a0, 984
+; RV32-NEXT: ret
+ %add.0 = add i32 %x, 123
+ %shl = shl i32 %add.0, 3
+ %add.1 = add i32 %shl, %y
+ ret i32 %add.1
+}
+
+define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b) {
+; RV32-LABEL: add_shl_moreOneUse_inStore:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a3, a1, 5
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: sw a2, 0(a4)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a2, 24(a0)
+; RV32-NEXT: sw a3, 140(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %b, ptr %arrayidx
+ %0 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx3 = getelementptr i8, ptr %0, i32 24
+ store i32 %b, ptr %arrayidx3
+ %arrayidx5 = getelementptr i8, ptr %0, i32 140
+ store i32 %add, ptr %arrayidx5
+ ret void
+}
+
+define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32 %b) {
+; RV32-LABEL: add_shl_moreOneUse_inStore_addexceedsign12:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a3, a1, 2047
+; RV32-NEXT: addi a3, a3, 1
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a4, a0, a4
+; RV32-NEXT: sw a2, 0(a4)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: lui a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a3, 4(a0)
+; RV32-NEXT: sw a2, 120(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %b, ptr %arrayidx
+ %0 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx2 = getelementptr i8, ptr %0, i32 8196
+ store i32 %add, ptr %arrayidx2
+ %arrayidx4 = getelementptr i8, ptr %0, i32 8312
+ store i32 %b, ptr %arrayidx4
+ ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a4, a1, 5
+; RV32-NEXT: mv a5, a4
+; RV32-NEXT: bgtz a3, .LBB3_2
+; RV32-NEXT: # %bb.1: # %entry
+; RV32-NEXT: mv a5, a2
+; RV32-NEXT: .LBB3_2: # %entry
+; RV32-NEXT: slli a2, a4, 2
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: sw a5, 0(a2)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a5, 24(a0)
+; RV32-NEXT: sw a4, 140(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 5
+ %cmp = icmp sgt i32 %x, 0
+ %cond = select i1 %cmp, i32 %add, i32 %b
+ %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %cond, ptr %arrayidx
+ %0 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx2 = getelementptr i32, ptr %0, i32 6
+ store i32 %cond, ptr %arrayidx2
+ %arrayidx4 = getelementptr i32, ptr %0, i32 35
+ store i32 %add, ptr %arrayidx4
+ ret void
+}
+
+define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect_addexceedsign12:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi a4, a1, 2047
+; RV32-NEXT: addi a4, a4, 1
+; RV32-NEXT: mv a5, a4
+; RV32-NEXT: bgtz a3, .LBB4_2
+; RV32-NEXT: # %bb.1: # %entry
+; RV32-NEXT: mv a5, a2
+; RV32-NEXT: .LBB4_2: # %entry
+; RV32-NEXT: slli a2, a4, 2
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: sw a5, 0(a2)
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: lui a1, 2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a5, 4(a0)
+; RV32-NEXT: sw a4, 120(a0)
+; RV32-NEXT: ret
+entry:
+ %add = add nsw i32 %a, 2048
+ %cmp = icmp sgt i32 %x, 0
+ %spec.select = select i1 %cmp, i32 %add, i32 %b
+ %0 = getelementptr inbounds i32, ptr %array1, i32 %add
+ store i32 %spec.select, ptr %0, align 4
+ %1 = getelementptr i32, ptr %array1, i32 %a
+ %arrayidx4 = getelementptr i8, ptr %1, i32 8196
+ store i32 %spec.select, ptr %arrayidx4
+ %arrayidx6 = getelementptr i8, ptr %1, i32 8312
+ store i32 %add, ptr %arrayidx6
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll b/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
deleted file mode 100644
index 4901e268ec11a0..00000000000000
--- a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV64 %s
-
-define void @test(ptr nocapture noundef writeonly %array1, i32 noundef signext %a, i32 noundef signext %b) {
-; RV64-LABEL: test:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a3, a1, 5
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: sw a2, 0(a4)
-; RV64-NEXT: sw a2, 24(a0)
-; RV64-NEXT: sw a3, 140(a0)
-; RV64-NEXT: ret
-entry:
- %add = add nsw i32 %a, 5
- %idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
- store i32 %b, ptr %arrayidx, align 4
- %add3 = add nsw i32 %a, 6
- %idxprom4 = sext i32 %add3 to i64
- %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
- store i32 %b, ptr %arrayidx5, align 4
- %add6 = add nsw i32 %a, 35
- %idxprom7 = sext i32 %add6 to i64
- %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
- store i32 %add, ptr %arrayidx8, align 4
- ret void
-}
-
-; test of jumpping, find add's operand has one more use can simplified
-define void @test1(ptr nocapture noundef %array1, i32 noundef signext %a, i32 noundef signext %b, i32 noundef signext %x) {
-; RV64-LABEL: test1:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a4, a1, 5
-; RV64-NEXT: slli a5, a4, 2
-; RV64-NEXT: add a5, a0, a5
-; RV64-NEXT: mv a6, a4
-; RV64-NEXT: bgtz a3, .LBB1_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a6, a2
-; RV64-NEXT: .LBB1_2: # %entry
-; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: sw a6, 0(a5)
-; RV64-NEXT: sw a6, 24(a0)
-; RV64-NEXT: sw a4, 140(a0)
-; RV64-NEXT: ret
-entry:
- %add = add nsw i32 %a, 5
- %cmp = icmp sgt i32 %x, 0
- %idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
- %add.b = select i1 %cmp, i32 %add, i32 %b
- store i32 %add.b, ptr %arrayidx, align 4
- %add5 = add nsw i32 %a, 6
- %idxprom6 = sext i32 %add5 to i64
- %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
- store i32 %add.b, ptr %arrayidx7, align 4
- %add8 = add nsw i32 %a, 35
- %idxprom9 = sext i32 %add8 to i64
- %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
- store i32 %add, ptr %arrayidx10, align 4
- ret void
-}
-
-define void @test2(ptr nocapture noundef writeonly %array1, i64 noundef %a, i64 noundef %b) local_unnamed_addr #0 {
-; RV64-LABEL: test2:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a3, a1, 5
-; RV64-NEXT: slli a4, a3, 3
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: slli a1, a1, 3
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: sd a2, 0(a4)
-; RV64-NEXT: sd a2, 48(a0)
-; RV64-NEXT: sd a3, 280(a0)
-; RV64-NEXT: ret
-entry:
- %add = add nsw i64 %a, 5
- %arrayidx = getelementptr inbounds i64, ptr %array1, i64 %add
- store i64 %b, ptr %arrayidx, align 8
- %add2 = add nsw i64 %a, 6
- %arrayidx3 = getelementptr inbounds i64, ptr %array1, i64 %add2
- store i64 %b, ptr %arrayidx3, align 8
- %add4 = add nsw i64 %a, 35
- %arrayidx5 = getelementptr inbounds i64, ptr %array1, i64 %add4
- store i64 %add, ptr %arrayidx5, align 8
- ret void
-}
-
-define void @test3(ptr nocapture noundef %array1, i64 noundef %a, i64 noundef %b, i64 noundef %x) {
-; RV64-LABEL: test3:
-; RV64: # %bb.0: # %entry
-; RV64-NEXT: addi a4, a1, 5
-; RV64-NEXT: mv a5, a4
-; RV64-NEXT: bgtz a3, .LBB3_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a5, a2
-; RV64-NEXT: .LBB3_2: # %entry
-; RV64-NEXT: slli a2, a4, 3
-; RV64-NEXT: add a2, a0, a2
-; RV64-NEXT: slli a1, a1, 3
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: sd a5, 0(a2)
-; RV64-NEXT: sd a5, 48(a0)
-; RV64-NEXT: sd a4, 280(a0)
-; RV64-NEXT: ret
-entry:
- %add = add nsw i64 %a, 5
- %cmp = icmp sgt i64 %x, 0
- %spec.select = select i1 %cmp, i64 %add, i64 %b
- %0 = getelementptr inbounds i64, ptr %array1, i64 %add
- store i64 %spec.select, ptr %0, align 8
- %add3 = add nsw i64 %a, 6
- %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
- store i64 %spec.select, ptr %arrayidx4, align 8
- %add5 = add nsw i64 %a, 35
- %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
- store i64 %add, ptr %arrayidx6, align 8
- ret void
-}
>From d8ee1b0c7fcad6ca44cfd25f31d7c7a52a4e55e1 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Fri, 9 Aug 2024 13:08:36 +0800
Subject: [PATCH 2/7] [DAGCombine] Remove OneUse restriction when folding (shl
(add x, c1), c2) and (shl (sext (add x, c1)), c2)
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 5 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 5 ++
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 7 +++
llvm/lib/Target/ARM/ARMISelLowering.cpp | 6 ++
.../Target/Hexagon/HexagonISelLowering.cpp | 15 +++++
llvm/lib/Target/Hexagon/HexagonISelLowering.h | 3 +
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 15 +++++
llvm/lib/Target/PowerPC/PPCISelLowering.h | 3 +
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 48 ++++++++++++++++
llvm/lib/Target/X86/X86ISelLowering.cpp | 16 ++++++
llvm/lib/Target/X86/X86ISelLowering.h | 3 +
.../CodeGen/RISCV/add_sext_shl_constant.ll | 56 ++++++++-----------
llvm/test/CodeGen/RISCV/add_shl_constant.ll | 24 +++-----
13 files changed, 153 insertions(+), 53 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b800204d917503..2377e1ccf379ab 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10192,7 +10192,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// Variant of version done on multiply, except mul by a power of 2 is turned
// into a shift.
if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
- N0->hasOneUse() && TLI.isDesirableToCommuteWithShift(N, Level)) {
+ TLI.isDesirableToCommuteWithShift(N, Level)) {
SDValue N01 = N0.getOperand(1);
if (SDValue Shl1 =
DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) {
@@ -10211,8 +10211,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// TODO: Should we limit this with isLegalAddImmediate?
if (N0.getOpcode() == ISD::SIGN_EXTEND &&
N0.getOperand(0).getOpcode() == ISD::ADD &&
- N0.getOperand(0)->getFlags().hasNoSignedWrap() && N0->hasOneUse() &&
- N0.getOperand(0)->hasOneUse() &&
+ N0.getOperand(0)->getFlags().hasNoSignedWrap() &&
TLI.isDesirableToCommuteWithShift(N, Level)) {
SDValue Add = N0.getOperand(0);
SDLoc DL(N0);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 31a720ed7b5c77..5b1d8209e766c6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17912,6 +17912,11 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
SDValue ShiftLHS = N->getOperand(0);
EVT VT = N->getValueType(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
// If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
// combine it with shift 'N' to let it be lowered to UBFX except:
// ((x >> C) & mask) << C.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e4b54c7d72b083..fc439e0be578a2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1072,6 +1072,13 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
// Always commute pre-type legalization and right shifts.
// We're looking for shl(or(x,y),z) patterns.
if (Level < CombineLevel::AfterLegalizeTypes ||
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index a98b7a8420927e..13a4008bf32f23 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13883,6 +13883,12 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
if (Level == BeforeLegalizeTypes)
return true;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index ab9bc559367787..c1f7253353b2a6 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2156,6 +2156,21 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
return X.getValueType().isScalarInteger(); // 'tstbit'
}
+bool HexagonTargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
+ return true;
+}
+
bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 3fd961f5a74623..a6bd57630031c4 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -155,6 +155,9 @@ class HexagonTargetLowering : public TargetLowering {
bool hasBitTest(SDValue X, SDValue Y) const override;
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
+
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// Return true if an FMA operation is faster than a pair of mul and add
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index d8f3095ed7fb68..a529701201ccdf 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19137,3 +19137,18 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}
+
+bool PPCTargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
+ !ShiftLHS->hasOneUse())
+ return false;
+
+ return true;
+}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index dde45e4cf6f4ae..1562c0e0a70eeb 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1492,6 +1492,9 @@ namespace llvm {
/// through to determine the optimal load/store instruction format.
unsigned computeMOFlags(const SDNode *Parent, SDValue N,
SelectionDAG &DAG) const;
+
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
}; // end class PPCTargetLowering
namespace PPC {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index af7a39b2580a37..cbd2d33f00c9eb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17983,8 +17983,46 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
// (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
SDValue N0 = N->getOperand(0);
EVT Ty = N0.getValueType();
+
+ // LD/ST will optimize constant Offset extraction, so when AddNode is used by
+ // LD/ST, it can still complete the folding optimization operation performed
+ // above.
+ auto isLDST = [&]() {
+ bool canOptAwlays = false;
+ if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+ for (SDNode *Use : N0->uses()) {
+ // This use is the one we're on right now. Skip it
+ if (Use == N || Use->getOpcode() == ISD::SELECT)
+ continue;
+ if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+ canOptAwlays = false;
+ break;
+ }
+ canOptAwlays = true;
+ }
+ }
+
+ if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+ !N0->getOperand(0)->hasOneUse()) {
+ for (SDNode *Use : N0->getOperand(0)->uses()) {
+ // This use is the one we're on right now. Skip it
+ if (Use == N0.getNode() || Use->getOpcode() == ISD::SELECT)
+ continue;
+ if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+ canOptAwlays = false;
+ break;
+ }
+ canOptAwlays = true;
+ }
+ }
+ return canOptAwlays;
+ };
+
if (Ty.isScalarInteger() &&
(N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
+ if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+ return isLDST();
+ }
auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (C1 && C2) {
@@ -18019,6 +18057,16 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
return false;
}
}
+
+ if ((N0->getOpcode() == ISD::ADD || N0->getOpcode() == ISD::OR) &&
+ !N0->hasOneUse())
+ return false;
+
+ if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+ N0->getOperand(0)->getOpcode() == ISD::ADD &&
+ !(N0->hasOneUse() && N0->getOperand(0)->hasOneUse()))
+ return isLDST();
+
return true;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1c790f3813b7a4..d5e82a453803f7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60225,3 +60225,19 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
return TargetLowering::getPrefLoopAlignment();
}
+
+bool X86TargetLowering::isDesirableToCommuteWithShift(
+ const SDNode *N, CombineLevel Level) const {
+ using namespace llvm::SDPatternMatch;
+ assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
+ N->getOpcode() == ISD::SRL) &&
+ "Expected shift op");
+
+ SDValue ShiftLHS = N->getOperand(0);
+ SDValue Add;
+ if (sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))) ||
+ ShiftLHS->hasOneUse())
+ return true;
+
+ return false;
+}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 14ada1721fd40e..ae1a3a10a74b00 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1904,6 +1904,9 @@ namespace llvm {
SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
SDValue V2) const;
+
+ bool isDesirableToCommuteWithShift(const SDNode *N,
+ CombineLevel Level) const override;
};
namespace X86 {
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index 35f3656e868681..891230c10cc460 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -5,13 +5,11 @@
define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
; RV64-LABEL: add_sext_shl_moreOneUse_add:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a3, a1, 5
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: addi a3, a1, 5
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 20(a0)
; RV64-NEXT: sw a2, 24(a0)
; RV64-NEXT: sw a3, 140(a0)
; RV64-NEXT: ret
@@ -35,15 +33,13 @@ define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b
; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a3, a1, 2047
-; RV64-NEXT: addiw a3, a3, 1
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: sw a2, 0(a4)
+; RV64-NEXT: addi a3, a3, 1
+; RV64-NEXT: lui a4, 2
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: lui a4, 2
; RV64-NEXT: add a0, a0, a4
; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: sw a2, 0(a0)
; RV64-NEXT: sw a3, 4(a0)
; RV64-NEXT: sw a2, 120(a0)
; RV64-NEXT: ret
@@ -64,13 +60,11 @@ entry:
define void @add_sext_shl_moreOneUse_sext(ptr %array1, i32 %a, i32 %b) {
; RV64-LABEL: add_sext_shl_moreOneUse_sext:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a3, a1, 5
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a4, a0, a4
-; RV64-NEXT: sw a2, 0(a4)
; RV64-NEXT: sext.w a1, a1
+; RV64-NEXT: addi a3, a1, 5
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sw a2, 20(a0)
; RV64-NEXT: sw a2, 24(a0)
; RV64-NEXT: sd a3, 140(a0)
; RV64-NEXT: ret
@@ -94,18 +88,16 @@ entry:
define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext %a, i32 %b, i32 signext %x) {
; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: addiw a4, a1, 5
-; RV64-NEXT: slli a5, a4, 2
-; RV64-NEXT: add a5, a0, a5
-; RV64-NEXT: mv a6, a4
+; RV64-NEXT: addi a4, a1, 5
+; RV64-NEXT: mv a5, a4
; RV64-NEXT: bgtz a3, .LBB3_2
; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a6, a2
+; RV64-NEXT: mv a5, a2
; RV64-NEXT: .LBB3_2: # %entry
-; RV64-NEXT: sw a6, 0(a5)
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: sw a6, 24(a0)
+; RV64-NEXT: sw a5, 20(a0)
+; RV64-NEXT: sw a5, 24(a0)
; RV64-NEXT: sw a4, 140(a0)
; RV64-NEXT: ret
entry:
@@ -130,20 +122,18 @@ define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i
; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
; RV64: # %bb.0: # %entry
; RV64-NEXT: addi a4, a1, 2047
-; RV64-NEXT: addiw a4, a4, 1
-; RV64-NEXT: slli a6, a4, 2
-; RV64-NEXT: add a6, a0, a6
-; RV64-NEXT: mv a5, a4
+; RV64-NEXT: addi a4, a4, 1
+; RV64-NEXT: lui a5, 2
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, a0, a5
+; RV64-NEXT: mv a1, a4
; RV64-NEXT: bgtz a3, .LBB4_2
; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: mv a5, a2
+; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB4_2: # %entry
-; RV64-NEXT: sw a5, 0(a6)
-; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: lui a1, 2
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: sw a5, 4(a0)
+; RV64-NEXT: sw a1, 0(a0)
+; RV64-NEXT: sw a1, 4(a0)
; RV64-NEXT: sw a4, 120(a0)
; RV64-NEXT: ret
entry:
@@ -171,11 +161,9 @@ define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
; RV64-NEXT: # %bb.1: # %entry
; RV64-NEXT: mv a5, a2
; RV64-NEXT: .LBB5_2: # %entry
-; RV64-NEXT: slli a2, a4, 3
-; RV64-NEXT: add a2, a0, a2
-; RV64-NEXT: sd a5, 0(a2)
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: sd a5, 40(a0)
; RV64-NEXT: sd a5, 48(a0)
; RV64-NEXT: sd a4, 280(a0)
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index 5c71a3c5449940..d60fe73fb20bff 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -19,11 +19,9 @@ define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b) {
; RV32-LABEL: add_shl_moreOneUse_inStore:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a1, 5
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a4, a0, a4
-; RV32-NEXT: sw a2, 0(a4)
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a2, 20(a0)
; RV32-NEXT: sw a2, 24(a0)
; RV32-NEXT: sw a3, 140(a0)
; RV32-NEXT: ret
@@ -44,13 +42,11 @@ define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi a3, a1, 2047
; RV32-NEXT: addi a3, a3, 1
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a4, a0, a4
-; RV32-NEXT: sw a2, 0(a4)
+; RV32-NEXT: lui a4, 2
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: lui a1, 2
-; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a4
+; RV32-NEXT: sw a2, 0(a0)
; RV32-NEXT: sw a3, 4(a0)
; RV32-NEXT: sw a2, 120(a0)
; RV32-NEXT: ret
@@ -75,11 +71,9 @@ define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a5, a2
; RV32-NEXT: .LBB3_2: # %entry
-; RV32-NEXT: slli a2, a4, 2
-; RV32-NEXT: add a2, a0, a2
-; RV32-NEXT: sw a5, 0(a2)
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: sw a5, 20(a0)
; RV32-NEXT: sw a5, 24(a0)
; RV32-NEXT: sw a4, 140(a0)
; RV32-NEXT: ret
@@ -107,13 +101,11 @@ define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i3
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: mv a5, a2
; RV32-NEXT: .LBB4_2: # %entry
-; RV32-NEXT: slli a2, a4, 2
-; RV32-NEXT: add a2, a0, a2
-; RV32-NEXT: sw a5, 0(a2)
+; RV32-NEXT: lui a2, 2
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: lui a1, 2
-; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: sw a5, 0(a0)
; RV32-NEXT: sw a5, 4(a0)
; RV32-NEXT: sw a4, 120(a0)
; RV32-NEXT: ret
>From bccad663018af34406e5ab5754bcb00efcdc42e9 Mon Sep 17 00:00:00 2001
From: "Liqin.Weng" <liqin.weng at spacemit.com>
Date: Fri, 1 Nov 2024 10:41:19 +0800
Subject: [PATCH 3/7] fix the comments
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++++--
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 6 ++++--
llvm/lib/Target/ARM/ARMISelLowering.cpp | 6 ++++--
llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 13 ++++++++-----
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 13 ++++++++-----
llvm/lib/Target/X86/X86ISelLowering.cpp | 5 +++--
6 files changed, 33 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5b1d8209e766c6..19e9349a97b85b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17911,10 +17911,13 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
SDValue ShiftLHS = N->getOperand(0);
EVT VT = N->getValueType(0);
+ SDValue Add;
+
+ if (!ShiftLHS->hasOneUse())
+ return false;
if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
- !ShiftLHS->hasOneUse())
+ !ShiftLHS.getOperand(0)->hasOneUse()))
return false;
// If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
@@ -17935,6 +17938,7 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
}
}
}
+
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index fc439e0be578a2..1cb344355d3229 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1074,9 +1074,11 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
+ if (!ShiftLHS->hasOneUse())
+ return false;
+
if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
- !ShiftLHS->hasOneUse())
+ !ShiftLHS.getOperand(0)->hasOneUse()))
return false;
// Always commute pre-type legalization and right shifts.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 13a4008bf32f23..a4634002d5b733 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13884,9 +13884,11 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
+ if (!ShiftLHS->hasOneUse())
+ return false;
+
if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
- !ShiftLHS->hasOneUse())
+ !ShiftLHS.getOperand(0)->hasOneUse()))
return false;
if (Level == BeforeLegalizeTypes)
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index c1f7253353b2a6..21b5d5348f392a 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -28,6 +28,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
+#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -2158,17 +2159,19 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
bool HexagonTargetLowering::isDesirableToCommuteWithShift(
const SDNode *N, CombineLevel Level) const {
+ using namespace llvm::SDPatternMatch;
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
- !ShiftLHS->hasOneUse())
- return false;
+ SDValue Add;
- return true;
+ if (ShiftLHS->hasOneUse() ||
+ sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
+ return true;
+
+ return false;
}
bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index a529701201ccdf..553a7a7fbf43ec 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -49,6 +49,7 @@
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
+#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
@@ -19140,15 +19141,17 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
bool PPCTargetLowering::isDesirableToCommuteWithShift(
const SDNode *N, CombineLevel Level) const {
+ using namespace llvm::SDPatternMatch;
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !(ShiftLHS->hasOneUse() && ShiftLHS.getOperand(0)->hasOneUse())) ||
- !ShiftLHS->hasOneUse())
- return false;
+ SDValue Add;
- return true;
+ if (ShiftLHS->hasOneUse() ||
+ sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
+ return true;
+
+ return false;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d5e82a453803f7..3144c99bf4de17 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60235,8 +60235,9 @@ bool X86TargetLowering::isDesirableToCommuteWithShift(
SDValue ShiftLHS = N->getOperand(0);
SDValue Add;
- if (sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))) ||
- ShiftLHS->hasOneUse())
+
+ if (ShiftLHS->hasOneUse() ||
+ sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
return true;
return false;
>From 04a843e531b55d66953cd640c7ae71b7a8a1294e Mon Sep 17 00:00:00 2001
From: "Liqin.Weng" <liqin.weng at spacemit.com>
Date: Tue, 5 Nov 2024 12:27:50 +0800
Subject: [PATCH 4/7] fix some comments
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 --
llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 14 +++++++-------
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 13 ++++++-------
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 ++---
llvm/lib/Target/X86/X86ISelLowering.cpp | 12 ++++++------
llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll | 3 +--
llvm/test/CodeGen/RISCV/add_shl_constant.ll | 3 +--
7 files changed, 23 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c22893702d5c25..e4e92300f86651 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17904,7 +17904,6 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
SDValue ShiftLHS = N->getOperand(0);
EVT VT = N->getValueType(0);
- SDValue Add;
if (!ShiftLHS->hasOneUse())
return false;
@@ -17931,7 +17930,6 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
}
}
}
-
return true;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 21b5d5348f392a..87be876bb21fce 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
-#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -2159,19 +2158,20 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
bool HexagonTargetLowering::isDesirableToCommuteWithShift(
const SDNode *N, CombineLevel Level) const {
- using namespace llvm::SDPatternMatch;
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
- SDValue Add;
- if (ShiftLHS->hasOneUse() ||
- sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
- return true;
+ if (!ShiftLHS->hasOneUse())
+ return false;
- return false;
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse()))
+ return false;
+
+ return true;
}
bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 553a7a7fbf43ec..740fa30425cea1 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -49,7 +49,6 @@
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
-#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
@@ -19141,17 +19140,17 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
bool PPCTargetLowering::isDesirableToCommuteWithShift(
const SDNode *N, CombineLevel Level) const {
- using namespace llvm::SDPatternMatch;
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
- SDValue Add;
+ if (!ShiftLHS->hasOneUse())
+ return false;
- if (ShiftLHS->hasOneUse() ||
- sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
- return true;
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse()))
+ return false;
- return false;
+ return true;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 24c94de396ad8f..e736743cc954aa 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18099,13 +18099,12 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
}
}
- if ((N0->getOpcode() == ISD::ADD || N0->getOpcode() == ISD::OR) &&
- !N0->hasOneUse())
+ if (!N0->hasOneUse())
return false;
if (N0->getOpcode() == ISD::SIGN_EXTEND &&
N0->getOperand(0)->getOpcode() == ISD::ADD &&
- !(N0->hasOneUse() && N0->getOperand(0)->hasOneUse()))
+ !N0->getOperand(0)->hasOneUse())
return isLDST();
return true;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ced4ad052ca15d..8196141a405aea 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60247,17 +60247,17 @@ Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
bool X86TargetLowering::isDesirableToCommuteWithShift(
const SDNode *N, CombineLevel Level) const {
- using namespace llvm::SDPatternMatch;
assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
N->getOpcode() == ISD::SRL) &&
"Expected shift op");
SDValue ShiftLHS = N->getOperand(0);
- SDValue Add;
+ if (!ShiftLHS->hasOneUse())
+ return false;
- if (ShiftLHS->hasOneUse() ||
- sd_match(ShiftLHS, m_OneUse(m_SExt(m_OneUse(m_Value(Add))))))
- return true;
+ if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse()))
+ return false;
- return false;
+ return true;
}
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index 891230c10cc460..2c933e68e8307b 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV64 %s
+; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=RV64 %s
define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
; RV64-LABEL: add_sext_shl_moreOneUse_add:
diff --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
index d60fe73fb20bff..6a10147ff54b91 100644
--- a/llvm/test/CodeGen/RISCV/add_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -1,6 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefix=RV32 %s
+; RUN: llc -mtriple=riscv32 < %s | FileCheck -check-prefix=RV32 %s
define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
; RV32-LABEL: add_shl_oneUse:
>From f2cbb92bc83700fb8972b9a04f73e2ba90ba48b4 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Mon, 11 Nov 2024 10:19:36 +0800
Subject: [PATCH 5/7] fix the comments
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 ++--
llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 4 ++--
llvm/lib/Target/ARM/ARMISelLowering.cpp | 4 ++--
llvm/lib/Target/Hexagon/HexagonISelLowering.cpp | 4 ++--
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 4 ++--
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 ++--
6 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index cd40ccbd966e37..c3282b1875af0a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17913,8 +17913,8 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
if (!ShiftLHS->hasOneUse())
return false;
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse()))
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
return false;
// If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 685f0c1be9b2c9..c5d473ac2b42e7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1078,8 +1078,8 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
if (!ShiftLHS->hasOneUse())
return false;
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse()))
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
return false;
// Always commute pre-type legalization and right shifts.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index c8c4e6975b1d01..e7c53727923a4b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13889,8 +13889,8 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
if (!ShiftLHS->hasOneUse())
return false;
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse()))
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
return false;
if (Level == BeforeLegalizeTypes)
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 87be876bb21fce..91098511c18452 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2167,8 +2167,8 @@ bool HexagonTargetLowering::isDesirableToCommuteWithShift(
if (!ShiftLHS->hasOneUse())
return false;
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse()))
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
return false;
return true;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 2886a0a9a34e50..5c3db6f5a7fa65 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19115,8 +19115,8 @@ bool PPCTargetLowering::isDesirableToCommuteWithShift(
if (!ShiftLHS->hasOneUse())
return false;
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse()))
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
return false;
return true;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8b84d3e1681910..5b279e4e831336 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -60479,8 +60479,8 @@ bool X86TargetLowering::isDesirableToCommuteWithShift(
if (!ShiftLHS->hasOneUse())
return false;
- if ((ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse()))
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
return false;
return true;
>From 76a397f2efd5a8b149a6c27dbb6a4cc9fea371f5 Mon Sep 17 00:00:00 2001
From: "Liqin.Weng" <liqin.weng at spacemit.com>
Date: Thu, 14 Nov 2024 09:51:47 +0800
Subject: [PATCH 6/7] fix the comments
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 60f69508e4cb13..dbf2b1fcbe2c8b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18046,17 +18046,17 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
// LD/ST, it can still complete the folding optimization operation performed
// above.
auto isLDST = [&]() {
- bool canOptAwlays = false;
+ bool CanOptAlways = false;
if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
for (SDNode *Use : N0->uses()) {
// This use is the one we're on right now. Skip it
if (Use == N || Use->getOpcode() == ISD::SELECT)
continue;
if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
- canOptAwlays = false;
+ CanOptAlways = false;
break;
}
- canOptAwlays = true;
+ CanOptAlways = true;
}
}
@@ -18067,20 +18067,20 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
if (Use == N0.getNode() || Use->getOpcode() == ISD::SELECT)
continue;
if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
- canOptAwlays = false;
+ CanOptAlways = false;
break;
}
- canOptAwlays = true;
+ CanOptAlways = true;
}
}
- return canOptAwlays;
+ return CanOptAlways;
};
if (Ty.isScalarInteger() &&
(N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
- if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+ if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse())
return isLDST();
- }
+
auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (C1 && C2) {
>From eb29b6dd98486e233f9871dd09cffa54961a89d1 Mon Sep 17 00:00:00 2001
From: LiqinWeng <liqin.weng at spacemit.com>
Date: Sun, 24 Nov 2024 11:36:17 +0800
Subject: [PATCH 7/7] fix the comments
---
llvm/include/llvm/CodeGen/TargetLowering.h | 6 +
.../Target/Hexagon/HexagonISelLowering.cpp | 18 --
llvm/lib/Target/Hexagon/HexagonISelLowering.h | 3 -
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 17 --
llvm/lib/Target/PowerPC/PPCISelLowering.h | 3 -
llvm/test/CodeGen/AArch64/tbl-loops.ll | 163 ++----------------
6 files changed, 19 insertions(+), 191 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index e0b638201a0474..f52b4b53124b02 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4297,6 +4297,12 @@ class TargetLowering : public TargetLoweringBase {
/// @param Level the current DAGCombine legalization level.
virtual bool isDesirableToCommuteWithShift(const SDNode *N,
CombineLevel Level) const {
+ SDValue ShiftLHS = N->getOperand(0);
+ if (!ShiftLHS->hasOneUse())
+ return false;
+ if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+ !ShiftLHS.getOperand(0)->hasOneUse())
+ return false;
return true;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 91098511c18452..ab9bc559367787 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -2156,24 +2156,6 @@ bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
return X.getValueType().isScalarInteger(); // 'tstbit'
}
-bool HexagonTargetLowering::isDesirableToCommuteWithShift(
- const SDNode *N, CombineLevel Level) const {
- assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
- N->getOpcode() == ISD::SRL) &&
- "Expected shift op");
-
- SDValue ShiftLHS = N->getOperand(0);
-
- if (!ShiftLHS->hasOneUse())
- return false;
-
- if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse())
- return false;
-
- return true;
-}
-
bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
return isTruncateFree(EVT::getEVT(Ty1), EVT::getEVT(Ty2));
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index a6bd57630031c4..3fd961f5a74623 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -155,9 +155,6 @@ class HexagonTargetLowering : public TargetLowering {
bool hasBitTest(SDValue X, SDValue Y) const override;
- bool isDesirableToCommuteWithShift(const SDNode *N,
- CombineLevel Level) const override;
-
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// Return true if an FMA operation is faster than a pair of mul and add
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 5c3db6f5a7fa65..ec4f8f4be425ed 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -19104,20 +19104,3 @@ Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
return Builder.CreateOr(
Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64");
}
-
-bool PPCTargetLowering::isDesirableToCommuteWithShift(
- const SDNode *N, CombineLevel Level) const {
- assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
- N->getOpcode() == ISD::SRL) &&
- "Expected shift op");
-
- SDValue ShiftLHS = N->getOperand(0);
- if (!ShiftLHS->hasOneUse())
- return false;
-
- if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
- !ShiftLHS.getOperand(0)->hasOneUse())
- return false;
-
- return true;
-}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 1d36a68960bca4..0adbad86845973 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1491,9 +1491,6 @@ namespace llvm {
/// through to determine the optimal load/store instruction format.
unsigned computeMOFlags(const SDNode *Parent, SDValue N,
SelectionDAG &DAG) const;
-
- bool isDesirableToCommuteWithShift(const SDNode *N,
- CombineLevel Level) const override;
}; // end class PPCTargetLowering
namespace PPC {
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 0ad99008655184..4bbadd0be3d543 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -1,145 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
-define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %data, i32 noundef %width) {
-; CHECK-LABEL: loop1:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: subs w8, w2, #1
-; CHECK-NEXT: b.lt .LBB0_8
-; CHECK-NEXT: // %bb.1: // %for.body.preheader
-; CHECK-NEXT: cmp w8, #6
-; CHECK-NEXT: b.hi .LBB0_3
-; CHECK-NEXT: // %bb.2:
-; CHECK-NEXT: mov w10, wzr
-; CHECK-NEXT: mov x8, x1
-; CHECK-NEXT: mov x9, x0
-; CHECK-NEXT: b .LBB0_6
-; CHECK-NEXT: .LBB0_3: // %vector.ph
-; CHECK-NEXT: add x11, x8, #1
-; CHECK-NEXT: mov w8, #1132396544 // =0x437f0000
-; CHECK-NEXT: add x12, x0, #4
-; CHECK-NEXT: and x10, x11, #0x1fffffff8
-; CHECK-NEXT: dup v0.4s, w8
-; CHECK-NEXT: add x13, x1, #16
-; CHECK-NEXT: add x8, x1, x10, lsl #2
-; CHECK-NEXT: add x9, x0, x10
-; CHECK-NEXT: mov x14, x10
-; CHECK-NEXT: .LBB0_4: // %vector.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldp q1, q2, [x13, #-16]
-; CHECK-NEXT: subs x14, x14, #8
-; CHECK-NEXT: add x13, x13, #32
-; CHECK-NEXT: fcmgt v3.4s, v1.4s, v0.4s
-; CHECK-NEXT: fcmgt v4.4s, v2.4s, v0.4s
-; CHECK-NEXT: fcmlt v5.4s, v1.4s, #0.0
-; CHECK-NEXT: fcmlt v6.4s, v2.4s, #0.0
-; CHECK-NEXT: bit v1.16b, v0.16b, v3.16b
-; CHECK-NEXT: bit v2.16b, v0.16b, v4.16b
-; CHECK-NEXT: bic v1.16b, v1.16b, v5.16b
-; CHECK-NEXT: bic v2.16b, v2.16b, v6.16b
-; CHECK-NEXT: fcvtzs v1.4s, v1.4s
-; CHECK-NEXT: fcvtzs v2.4s, v2.4s
-; CHECK-NEXT: xtn v1.4h, v1.4s
-; CHECK-NEXT: xtn v2.4h, v2.4s
-; CHECK-NEXT: uzp1 v1.8b, v1.8b, v0.8b
-; CHECK-NEXT: uzp1 v2.8b, v2.8b, v0.8b
-; CHECK-NEXT: mov v1.s[1], v2.s[0]
-; CHECK-NEXT: stur d1, [x12, #-4]
-; CHECK-NEXT: add x12, x12, #8
-; CHECK-NEXT: b.ne .LBB0_4
-; CHECK-NEXT: // %bb.5: // %middle.block
-; CHECK-NEXT: cmp x11, x10
-; CHECK-NEXT: b.eq .LBB0_8
-; CHECK-NEXT: .LBB0_6: // %for.body.preheader1
-; CHECK-NEXT: movi d0, #0000000000000000
-; CHECK-NEXT: mov w11, #1132396544 // =0x437f0000
-; CHECK-NEXT: sub w10, w2, w10
-; CHECK-NEXT: fmov s1, w11
-; CHECK-NEXT: .LBB0_7: // %for.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr s2, [x8], #4
-; CHECK-NEXT: fcmp s2, s1
-; CHECK-NEXT: fcsel s3, s1, s2, gt
-; CHECK-NEXT: fcmp s2, #0.0
-; CHECK-NEXT: fcsel s2, s0, s3, mi
-; CHECK-NEXT: subs w10, w10, #1
-; CHECK-NEXT: fcvtzs w11, s2
-; CHECK-NEXT: strb w11, [x9], #1
-; CHECK-NEXT: b.ne .LBB0_7
-; CHECK-NEXT: .LBB0_8: // %for.cond.cleanup
-; CHECK-NEXT: ret
-entry:
- %cmp9 = icmp sgt i32 %width, 0
- br i1 %cmp9, label %for.body.preheader, label %for.cond.cleanup
-for.body.preheader: ; preds = %entry
- %0 = add i32 %width, -1
- %1 = zext i32 %0 to i64
- %2 = add nuw nsw i64 %1, 1
- %min.iters.check = icmp ult i32 %0, 7
- br i1 %min.iters.check, label %for.body.preheader21, label %vector.ph
-
-vector.ph: ; preds = %for.body.preheader
- %n.vec = and i64 %2, 8589934584
- %ind.end = trunc i64 %n.vec to i32
- %ind.end14 = getelementptr float, ptr %data, i64 %n.vec
- %ind.end16 = getelementptr i8, ptr %dst, i64 %n.vec
- br label %vector.body
-
-vector.body: ; preds = %vector.body, %vector.ph
- %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %next.gep = getelementptr float, ptr %data, i64 %index
- %next.gep18 = getelementptr i8, ptr %dst, i64 %index
- %wide.load = load <4 x float>, ptr %next.gep, align 4
- %3 = getelementptr float, ptr %next.gep, i64 4
- %wide.load20 = load <4 x float>, ptr %3, align 4
- %4 = fcmp olt <4 x float> %wide.load, zeroinitializer
- %5 = fcmp olt <4 x float> %wide.load20, zeroinitializer
- %6 = fcmp ogt <4 x float> %wide.load, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
- %7 = fcmp ogt <4 x float> %wide.load20, <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>
- %8 = select <4 x i1> %6, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %wide.load
- %9 = select <4 x i1> %7, <4 x float> <float 2.550000e+02, float 2.550000e+02, float 2.550000e+02, float 2.550000e+02>, <4 x float> %wide.load20
- %10 = select <4 x i1> %4, <4 x float> zeroinitializer, <4 x float> %8
- %11 = select <4 x i1> %5, <4 x float> zeroinitializer, <4 x float> %9
- %12 = fptoui <4 x float> %10 to <4 x i8>
- %13 = fptoui <4 x float> %11 to <4 x i8>
- store <4 x i8> %12, ptr %next.gep18, align 1
- %14 = getelementptr i8, ptr %next.gep18, i64 4
- store <4 x i8> %13, ptr %14, align 1
- %index.next = add nuw i64 %index, 8
- %15 = icmp eq i64 %index.next, %n.vec
- br i1 %15, label %middle.block, label %vector.body
-
-middle.block: ; preds = %vector.body
- %cmp.n = icmp eq i64 %2, %n.vec
- br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader21
-
-for.body.preheader21: ; preds = %for.body.preheader, %middle.block
- %i.012.ph = phi i32 [ 0, %for.body.preheader ], [ %ind.end, %middle.block ]
- %src.011.ph = phi ptr [ %data, %for.body.preheader ], [ %ind.end14, %middle.block ]
- %dst.addr.010.ph = phi ptr [ %dst, %for.body.preheader ], [ %ind.end16, %middle.block ]
- br label %for.body
-
-for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
- ret void
-
-for.body: ; preds = %for.body.preheader21, %for.body
- %i.012 = phi i32 [ %inc, %for.body ], [ %i.012.ph, %for.body.preheader21 ]
- %src.011 = phi ptr [ %add.ptr, %for.body ], [ %src.011.ph, %for.body.preheader21 ]
- %dst.addr.010 = phi ptr [ %add.ptr2, %for.body ], [ %dst.addr.010.ph, %for.body.preheader21 ]
- %16 = load float, ptr %src.011, align 4
- %cmp.i = fcmp olt float %16, 0.000000e+00
- %cmp1.i = fcmp ogt float %16, 2.550000e+02
- %.x.i = select i1 %cmp1.i, float 2.550000e+02, float %16
- %retval.0.i = select i1 %cmp.i, float 0.000000e+00, float %.x.i
- %conv = fptoui float %retval.0.i to i8
- store i8 %conv, ptr %dst.addr.010, align 1
- %add.ptr = getelementptr inbounds float, ptr %src.011, i64 1
- %add.ptr2 = getelementptr inbounds i8, ptr %dst.addr.010, i64 1
- %inc = add nuw nsw i32 %i.012, 1
- %exitcond.not = icmp eq i32 %inc, %width
- br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
-}
define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture noundef readonly %data, i32 noundef %width) {
; CHECK-LABEL: loop2:
@@ -150,13 +12,13 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: cmp w8, #2
; CHECK-NEXT: b.ls .LBB1_4
; CHECK-NEXT: // %bb.2: // %vector.memcheck
-; CHECK-NEXT: ubfiz x9, x8, #1, #32
-; CHECK-NEXT: add x9, x9, #2
-; CHECK-NEXT: add x10, x1, x9, lsl #2
-; CHECK-NEXT: cmp x10, x0
+; CHECK-NEXT: add x9, x1, w8, uxtw #3
+; CHECK-NEXT: add x9, x9, #8
+; CHECK-NEXT: cmp x9, x0
; CHECK-NEXT: b.ls .LBB1_8
; CHECK-NEXT: // %bb.3: // %vector.memcheck
-; CHECK-NEXT: add x9, x0, x9
+; CHECK-NEXT: add x9, x0, w8, uxtw #1
+; CHECK-NEXT: add x9, x9, #2
; CHECK-NEXT: cmp x9, x1
; CHECK-NEXT: b.ls .LBB1_8
; CHECK-NEXT: .LBB1_4:
@@ -326,9 +188,10 @@ define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: b.ls .LBB2_6
; CHECK-NEXT: // %bb.2: // %vector.memcheck
; CHECK-NEXT: add x9, x8, w8, uxtw #1
-; CHECK-NEXT: add x9, x9, #3
; CHECK-NEXT: add x10, x1, x9, lsl #2
-; CHECK-NEXT: add x9, x0, x9
+; CHECK-NEXT: add x9, x9, x0
+; CHECK-NEXT: add x9, x9, #3
+; CHECK-NEXT: add x10, x10, #12
; CHECK-NEXT: cmp x10, x0
; CHECK-NEXT: ccmp x9, x1, #0, hi
; CHECK-NEXT: b.hi .LBB2_6
@@ -534,13 +397,13 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: cmp w8, #2
; CHECK-NEXT: b.ls .LBB3_4
; CHECK-NEXT: // %bb.2: // %vector.memcheck
-; CHECK-NEXT: ubfiz x9, x8, #2, #32
-; CHECK-NEXT: add x9, x9, #4
-; CHECK-NEXT: add x10, x1, x9, lsl #2
-; CHECK-NEXT: cmp x10, x0
+; CHECK-NEXT: add x9, x1, w8, uxtw #4
+; CHECK-NEXT: add x9, x9, #16
+; CHECK-NEXT: cmp x9, x0
; CHECK-NEXT: b.ls .LBB3_8
; CHECK-NEXT: // %bb.3: // %vector.memcheck
-; CHECK-NEXT: add x9, x0, x9
+; CHECK-NEXT: add x9, x0, w8, uxtw #2
+; CHECK-NEXT: add x9, x9, #4
; CHECK-NEXT: cmp x9, x1
; CHECK-NEXT: b.ls .LBB3_8
; CHECK-NEXT: .LBB3_4:
More information about the llvm-commits
mailing list