[llvm] 3083acc - [DAGCombine] Remove oneuse restrictions for RISCV in folding (shl (add_nsw x, c1)), c2) and folding (shl(sext(add x, c1)), c2) in some scenarios (#101294)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 9 19:17:59 PST 2024


Author: LiqinWeng
Date: 2024-12-10T11:17:54+08:00
New Revision: 3083acc215e9d01c4c41064aa7dd75aeba975e29

URL: https://github.com/llvm/llvm-project/commit/3083acc215e9d01c4c41064aa7dd75aeba975e29
DIFF: https://github.com/llvm/llvm-project/commit/3083acc215e9d01c4c41064aa7dd75aeba975e29.diff

LOG: [DAGCombine] Remove oneuse restrictions for RISCV in folding (shl (add_nsw x, c1)), c2) and folding (shl(sext(add x, c1)), c2) in some scenarios (#101294)

This patch remove the restriction for folding (shl (add_nsw x, c1)), c2)
and folding (shl(sext(add x, c1)), c2), and test case from dhrystone ,
see this link:
riscv32: https://godbolt.org/z/o8GdMKrae
riscv64: https://godbolt.org/z/Yh5bPz56z

Added: 
    llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
    llvm/test/CodeGen/RISCV/add_shl_constant.ll

Modified: 
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 87f026f718dd1f..aaab209bfa75d6 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -4304,6 +4304,12 @@ class TargetLowering : public TargetLoweringBase {
   /// @param Level the current DAGCombine legalization level.
   virtual bool isDesirableToCommuteWithShift(const SDNode *N,
                                              CombineLevel Level) const {
+    SDValue ShiftLHS = N->getOperand(0);
+    if (!ShiftLHS->hasOneUse())
+      return false;
+    if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+        !ShiftLHS.getOperand(0)->hasOneUse())
+      return false;
     return true;
   }
 

diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 3bdf15b08e0f31..605937503407a8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10233,7 +10233,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
   // Variant of version done on multiply, except mul by a power of 2 is turned
   // into a shift.
   if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) &&
-      N0->hasOneUse() && TLI.isDesirableToCommuteWithShift(N, Level)) {
+      TLI.isDesirableToCommuteWithShift(N, Level)) {
     SDValue N01 = N0.getOperand(1);
     if (SDValue Shl1 =
             DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) {
@@ -10252,8 +10252,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
   // TODO: Should we limit this with isLegalAddImmediate?
   if (N0.getOpcode() == ISD::SIGN_EXTEND &&
       N0.getOperand(0).getOpcode() == ISD::ADD &&
-      N0.getOperand(0)->getFlags().hasNoSignedWrap() && N0->hasOneUse() &&
-      N0.getOperand(0)->hasOneUse() &&
+      N0.getOperand(0)->getFlags().hasNoSignedWrap() &&
       TLI.isDesirableToCommuteWithShift(N, Level)) {
     SDValue Add = N0.getOperand(0);
     SDLoc DL(N0);

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d1354ccf376609..cee609ed1e2f6f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -17931,6 +17931,13 @@ AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
   SDValue ShiftLHS = N->getOperand(0);
   EVT VT = N->getValueType(0);
 
+  if (!ShiftLHS->hasOneUse())
+    return false;
+
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
+    return false;
+
   // If ShiftLHS is unsigned bit extraction: ((x >> C) & mask), then do not
   // combine it with shift 'N' to let it be lowered to UBFX except:
   // ((x >> C) & mask) << C.

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d77508227b076b..48e9af9fe507fb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -1072,6 +1072,15 @@ bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
+
+  SDValue ShiftLHS = N->getOperand(0);
+  if (!ShiftLHS->hasOneUse())
+    return false;
+
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
+    return false;
+
   // Always commute pre-type legalization and right shifts.
   // We're looking for shl(or(x,y),z) patterns.
   if (Level < CombineLevel::AfterLegalizeTypes ||

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index c9250e4ed3422c..595bb6e73f453c 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13826,6 +13826,14 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
           N->getOpcode() == ISD::SRL) &&
          "Expected shift op");
 
+  SDValue ShiftLHS = N->getOperand(0);
+  if (!ShiftLHS->hasOneUse())
+    return false;
+
+  if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
+      !ShiftLHS.getOperand(0)->hasOneUse())
+    return false;
+
   if (Level == BeforeLegalizeTypes)
     return true;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 40c2a68e40853d..46dedcc3e09cf2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18233,8 +18233,46 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
   SDValue N0 = N->getOperand(0);
   EVT Ty = N0.getValueType();
+
+  // LD/ST will optimize constant Offset extraction, so when AddNode is used by
+  // LD/ST, it can still complete the folding optimization operation performed
+  // above.
+  auto isUsedByLdSt = [&]() {
+    bool CanOptAlways = false;
+    if (N0->getOpcode() == ISD::ADD && !N0->hasOneUse()) {
+      for (SDNode *Use : N0->uses()) {
+        // This use is the one we're on right now. Skip it
+        if (Use == N || Use->getOpcode() == ISD::SELECT)
+          continue;
+        if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+          CanOptAlways = false;
+          break;
+        }
+        CanOptAlways = true;
+      }
+    }
+
+    if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+        !N0->getOperand(0)->hasOneUse()) {
+      for (SDNode *Use : N0->getOperand(0)->uses()) {
+        // This use is the one we're on right now. Skip it
+        if (Use == N0.getNode() || Use->getOpcode() == ISD::SELECT)
+          continue;
+        if (!isa<StoreSDNode>(Use) && !isa<LoadSDNode>(Use)) {
+          CanOptAlways = false;
+          break;
+        }
+        CanOptAlways = true;
+      }
+    }
+    return CanOptAlways;
+  };
+
   if (Ty.isScalarInteger() &&
       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
+    if (N0.getOpcode() == ISD::ADD && !N0->hasOneUse())
+      return isUsedByLdSt();
+
     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
     if (C1 && C2) {
@@ -18269,6 +18307,15 @@ bool RISCVTargetLowering::isDesirableToCommuteWithShift(
         return false;
     }
   }
+
+  if (!N0->hasOneUse())
+    return false;
+
+  if (N0->getOpcode() == ISD::SIGN_EXTEND &&
+      N0->getOperand(0)->getOpcode() == ISD::ADD &&
+      !N0->getOperand(0)->hasOneUse())
+    return isUsedByLdSt();
+
   return true;
 }
 

diff  --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
new file mode 100644
index 00000000000000..47b6c07cc699e7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -0,0 +1,182 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 < %s | FileCheck -check-prefix=RV64 %s
+
+define void @add_sext_shl_moreOneUse_add(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a3, a1, 5
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a2, 20(a0)
+; RV64-NEXT:    sw a2, 24(a0)
+; RV64-NEXT:    sw a3, 140(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  store i32 %b, ptr %arrayidx
+  %add3 = add nsw i32 %a, 6
+  %idxprom4 = sext i32 %add3 to i64
+  %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+  store i32 %b, ptr %arrayidx5
+  %add6 = add nsw i32 %a, 35
+  %idxprom7 = sext i32 %add6 to i64
+  %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+  store i32 %add, ptr %arrayidx8
+  ret void
+}
+
+define void @add_sext_shl_moreOneUse_addexceedsign12(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_addexceedsign12:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a3, a1, 2047
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    addi a3, a3, 1
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a0, a4
+; RV64-NEXT:    add a0, a0, a1
+; RV64-NEXT:    sw a2, 0(a0)
+; RV64-NEXT:    sw a3, 4(a0)
+; RV64-NEXT:    sw a2, 120(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  store i32 %b, ptr %arrayidx
+  %0 = sext i32 %a to i64
+  %1 = getelementptr i32, ptr %array1, i64 %0
+  %arrayidx3 = getelementptr i8, ptr %1, i64 8196
+  store i32 %add, ptr %arrayidx3
+  %arrayidx6 = getelementptr i8, ptr %1, i64 8312
+  store i32 %b, ptr %arrayidx6
+  ret void
+}
+
+define void @add_sext_shl_moreOneUse_sext(ptr %array1, i32 %a, i32 %b) {
+; RV64-LABEL: add_sext_shl_moreOneUse_sext:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    addi a3, a1, 5
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a2, 20(a0)
+; RV64-NEXT:    sw a2, 24(a0)
+; RV64-NEXT:    sd a3, 140(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  store i32 %b, ptr %arrayidx
+  %add3 = add nsw i32 %a, 6
+  %idxprom4 = sext i32 %add3 to i64
+  %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
+  store i32 %b, ptr %arrayidx5
+  %add6 = add nsw i32 %a, 35
+  %idxprom7 = sext i32 %add6 to i64
+  %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
+  store i64 %idxprom, ptr %arrayidx8
+  ret void
+}
+
+; test of jumpping, find add's operand has one more use can simplified
+define void @add_sext_shl_moreOneUse_add_inSelect(ptr %array1, i32 signext  %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a4, a1, 5
+; RV64-NEXT:    mv a5, a4
+; RV64-NEXT:    bgtz a3, .LBB3_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a5, a2
+; RV64-NEXT:  .LBB3_2: # %entry
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sw a5, 20(a0)
+; RV64-NEXT:    sw a5, 24(a0)
+; RV64-NEXT:    sw a4, 140(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %cmp = icmp sgt i32 %x, 0
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  %add.b = select i1 %cmp, i32 %add, i32 %b
+  store i32 %add.b, ptr %arrayidx
+  %add5 = add nsw i32 %a, 6
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
+  store i32 %add.b, ptr %arrayidx7
+  %add8 = add nsw i32 %a, 35
+  %idxprom9 = sext i32 %add8 to i64
+  %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
+  store i32 %add, ptr %arrayidx10
+  ret void
+}
+
+define void @add_sext_shl_moreOneUse_add_inSelect_addexceedsign12(ptr %array1, i32 signext  %a, i32 %b, i32 signext %x) {
+; RV64-LABEL: add_sext_shl_moreOneUse_add_inSelect_addexceedsign12:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a4, a1, 2047
+; RV64-NEXT:    lui a5, 2
+; RV64-NEXT:    slli a6, a1, 2
+; RV64-NEXT:    addi a1, a4, 1
+; RV64-NEXT:    add a0, a0, a6
+; RV64-NEXT:    add a0, a0, a5
+; RV64-NEXT:    mv a4, a1
+; RV64-NEXT:    bgtz a3, .LBB4_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a4, a2
+; RV64-NEXT:  .LBB4_2: # %entry
+; RV64-NEXT:    sw a4, 0(a0)
+; RV64-NEXT:    sw a4, 4(a0)
+; RV64-NEXT:    sw a1, 120(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %cmp = icmp sgt i32 %x, 0
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
+  %add.b = select i1 %cmp, i32 %add, i32 %b
+  store i32 %add.b, ptr %arrayidx
+  %0 = sext i32 %a to i64
+  %1 = getelementptr i32, ptr %array1, i64 %0
+  %arrayidx7 = getelementptr i8, ptr %1, i64 8196
+  store i32 %add.b, ptr %arrayidx7
+  %arrayidx10 = getelementptr i8, ptr %1, i64 8312
+  store i32 %add, ptr %arrayidx10
+  ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i64 %a, i64 %b, i64 %x) {
+; RV64-LABEL: add_shl_moreOneUse_inSelect:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi a4, a1, 5
+; RV64-NEXT:    mv a5, a4
+; RV64-NEXT:    bgtz a3, .LBB5_2
+; RV64-NEXT:  # %bb.1: # %entry
+; RV64-NEXT:    mv a5, a2
+; RV64-NEXT:  .LBB5_2: # %entry
+; RV64-NEXT:    slli a1, a1, 3
+; RV64-NEXT:    add a0, a1, a0
+; RV64-NEXT:    sd a5, 40(a0)
+; RV64-NEXT:    sd a5, 48(a0)
+; RV64-NEXT:    sd a4, 280(a0)
+; RV64-NEXT:    ret
+entry:
+  %add = add nsw i64 %a, 5
+  %cmp = icmp sgt i64 %x, 0
+  %spec.select = select i1 %cmp, i64 %add, i64 %b
+  %0 = getelementptr inbounds i64, ptr %array1, i64 %add
+  store i64 %spec.select, ptr %0
+  %add3 = add nsw i64 %a, 6
+  %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
+  store i64 %spec.select, ptr %arrayidx4
+  %add5 = add nsw i64 %a, 35
+  %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
+  store i64 %add, ptr %arrayidx6
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/add_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
new file mode 100644
index 00000000000000..71b61868b8c844
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add_shl_constant.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32  < %s | FileCheck -check-prefix=RV32 %s
+
+define i32 @add_shl_oneUse(i32 %x, i32 %y) nounwind {
+; RV32-LABEL: add_shl_oneUse:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    addi a0, a0, 984
+; RV32-NEXT:    ret
+  %add.0 = add i32 %x, 123
+  %shl = shl i32 %add.0, 3
+  %add.1 = add i32 %shl, %y
+  ret i32 %add.1
+}
+
+define void @add_shl_moreOneUse_inStore(ptr %array1, i32 %a, i32 %b)  {
+; RV32-LABEL: add_shl_moreOneUse_inStore:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a1, 5
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a2, 20(a0)
+; RV32-NEXT:    sw a2, 24(a0)
+; RV32-NEXT:    sw a3, 140(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %b, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx3 = getelementptr i8, ptr %0, i32 24
+  store i32 %b, ptr %arrayidx3
+  %arrayidx5 = getelementptr i8, ptr %0, i32 140
+  store i32 %add, ptr %arrayidx5
+  ret void
+}
+
+define void @add_shl_moreOneUse_inStore_addexceedsign12(ptr %array1, i32 %a, i32 %b)  {
+; RV32-LABEL: add_shl_moreOneUse_inStore_addexceedsign12:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a3, a1, 2047
+; RV32-NEXT:    lui a4, 2
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    addi a3, a3, 1
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    add a0, a0, a4
+; RV32-NEXT:    sw a2, 0(a0)
+; RV32-NEXT:    sw a3, 4(a0)
+; RV32-NEXT:    sw a2, 120(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %b, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx2 = getelementptr i8, ptr %0, i32 8196
+  store i32 %add, ptr %arrayidx2
+  %arrayidx4 = getelementptr i8, ptr %0, i32 8312
+  store i32 %b, ptr %arrayidx4
+  ret void
+}
+
+define void @add_shl_moreOneUse_inSelect(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a4, a1, 5
+; RV32-NEXT:    mv a5, a4
+; RV32-NEXT:    bgtz a3, .LBB3_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a5, a2
+; RV32-NEXT:  .LBB3_2: # %entry
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    sw a5, 20(a0)
+; RV32-NEXT:    sw a5, 24(a0)
+; RV32-NEXT:    sw a4, 140(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 5
+  %cmp = icmp sgt i32 %x, 0
+  %cond = select i1 %cmp, i32 %add, i32 %b
+  %arrayidx = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %cond, ptr %arrayidx
+  %0 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx2 = getelementptr i32, ptr %0, i32 6
+  store i32 %cond, ptr %arrayidx2
+  %arrayidx4 = getelementptr i32, ptr %0, i32 35
+  store i32 %add, ptr %arrayidx4
+  ret void
+}
+
+define void @add_shl_moreOneUse_inSelect_addexceedsign12(ptr %array1, i32 %a, i32 %b, i32 %x) {
+; RV32-LABEL: add_shl_moreOneUse_inSelect_addexceedsign12:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi a4, a1, 2047
+; RV32-NEXT:    addi a4, a4, 1
+; RV32-NEXT:    mv a5, a4
+; RV32-NEXT:    bgtz a3, .LBB4_2
+; RV32-NEXT:  # %bb.1: # %entry
+; RV32-NEXT:    mv a5, a2
+; RV32-NEXT:  .LBB4_2: # %entry
+; RV32-NEXT:    lui a2, 2
+; RV32-NEXT:    slli a1, a1, 2
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    sw a5, 0(a0)
+; RV32-NEXT:    sw a5, 4(a0)
+; RV32-NEXT:    sw a4, 120(a0)
+; RV32-NEXT:    ret
+entry:
+  %add = add nsw i32 %a, 2048
+  %cmp = icmp sgt i32 %x, 0
+  %spec.select = select i1 %cmp, i32 %add, i32 %b
+  %0 = getelementptr inbounds i32, ptr %array1, i32 %add
+  store i32 %spec.select, ptr %0, align 4
+  %1 = getelementptr i32, ptr %array1, i32 %a
+  %arrayidx4 = getelementptr i8, ptr %1, i32 8196
+  store i32 %spec.select, ptr %arrayidx4
+  %arrayidx6 = getelementptr i8, ptr %1, i32 8312
+  store i32 %add, ptr %arrayidx6
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll b/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
deleted file mode 100644
index c1e7b682200eb1..00000000000000
--- a/llvm/test/CodeGen/RISCV/riscv-shifted-extend.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefix=RV64 %s
-
-define void @test(ptr nocapture noundef writeonly %array1, i32 noundef signext %a, i32 noundef signext %b) {
-; RV64-LABEL: test:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a3, a1, 5
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    slli a4, a3, 2
-; RV64-NEXT:    add a1, a1, a0
-; RV64-NEXT:    add a0, a0, a4
-; RV64-NEXT:    sw a2, 0(a0)
-; RV64-NEXT:    sw a2, 24(a1)
-; RV64-NEXT:    sw a3, 140(a1)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i32 %a, 5
-  %idxprom = sext i32 %add to i64
-  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
-  store i32 %b, ptr %arrayidx, align 4
-  %add3 = add nsw i32 %a, 6
-  %idxprom4 = sext i32 %add3 to i64
-  %arrayidx5 = getelementptr inbounds i32, ptr %array1, i64 %idxprom4
-  store i32 %b, ptr %arrayidx5, align 4
-  %add6 = add nsw i32 %a, 35
-  %idxprom7 = sext i32 %add6 to i64
-  %arrayidx8 = getelementptr inbounds i32, ptr %array1, i64 %idxprom7
-  store i32 %add, ptr %arrayidx8, align 4
-  ret void
-}
-
-; test of jumpping, find add's operand has one more use can simplified
-define void @test1(ptr nocapture noundef %array1, i32 noundef signext %a, i32 noundef signext %b, i32 noundef signext %x) {
-; RV64-LABEL: test1:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addiw a4, a1, 5
-; RV64-NEXT:    slli a5, a4, 2
-; RV64-NEXT:    add a5, a0, a5
-; RV64-NEXT:    mv a6, a4
-; RV64-NEXT:    bgtz a3, .LBB1_2
-; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a6, a2
-; RV64-NEXT:  .LBB1_2: # %entry
-; RV64-NEXT:    slli a1, a1, 2
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sw a6, 0(a5)
-; RV64-NEXT:    sw a6, 24(a0)
-; RV64-NEXT:    sw a4, 140(a0)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i32 %a, 5
-  %cmp = icmp sgt i32 %x, 0
-  %idxprom = sext i32 %add to i64
-  %arrayidx = getelementptr inbounds i32, ptr %array1, i64 %idxprom
-  %add.b = select i1 %cmp, i32 %add, i32 %b
-  store i32 %add.b, ptr %arrayidx, align 4
-  %add5 = add nsw i32 %a, 6
-  %idxprom6 = sext i32 %add5 to i64
-  %arrayidx7 = getelementptr inbounds i32, ptr %array1, i64 %idxprom6
-  store i32 %add.b, ptr %arrayidx7, align 4
-  %add8 = add nsw i32 %a, 35
-  %idxprom9 = sext i32 %add8 to i64
-  %arrayidx10 = getelementptr inbounds i32, ptr %array1, i64 %idxprom9
-  store i32 %add, ptr %arrayidx10, align 4
-  ret void
-}
-
-define void @test2(ptr nocapture noundef writeonly %array1, i64 noundef %a, i64 noundef %b) local_unnamed_addr #0 {
-; RV64-LABEL: test2:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a3, a1, 5
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    slli a4, a3, 3
-; RV64-NEXT:    add a1, a1, a0
-; RV64-NEXT:    add a0, a0, a4
-; RV64-NEXT:    sd a2, 0(a0)
-; RV64-NEXT:    sd a2, 48(a1)
-; RV64-NEXT:    sd a3, 280(a1)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i64 %a, 5
-  %arrayidx = getelementptr inbounds i64, ptr %array1, i64 %add
-  store i64 %b, ptr %arrayidx, align 8
-  %add2 = add nsw i64 %a, 6
-  %arrayidx3 = getelementptr inbounds i64, ptr %array1, i64 %add2
-  store i64 %b, ptr %arrayidx3, align 8
-  %add4 = add nsw i64 %a, 35
-  %arrayidx5 = getelementptr inbounds i64, ptr %array1, i64 %add4
-  store i64 %add, ptr %arrayidx5, align 8
-  ret void
-}
-
-define void @test3(ptr nocapture noundef %array1, i64 noundef %a, i64 noundef %b, i64 noundef %x) {
-; RV64-LABEL: test3:
-; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    addi a4, a1, 5
-; RV64-NEXT:    mv a5, a4
-; RV64-NEXT:    bgtz a3, .LBB3_2
-; RV64-NEXT:  # %bb.1: # %entry
-; RV64-NEXT:    mv a5, a2
-; RV64-NEXT:  .LBB3_2: # %entry
-; RV64-NEXT:    slli a2, a4, 3
-; RV64-NEXT:    slli a1, a1, 3
-; RV64-NEXT:    add a2, a0, a2
-; RV64-NEXT:    add a0, a1, a0
-; RV64-NEXT:    sd a5, 0(a2)
-; RV64-NEXT:    sd a5, 48(a0)
-; RV64-NEXT:    sd a4, 280(a0)
-; RV64-NEXT:    ret
-entry:
-  %add = add nsw i64 %a, 5
-  %cmp = icmp sgt i64 %x, 0
-  %spec.select = select i1 %cmp, i64 %add, i64 %b
-  %0 = getelementptr inbounds i64, ptr %array1, i64 %add
-  store i64 %spec.select, ptr %0, align 8
-  %add3 = add nsw i64 %a, 6
-  %arrayidx4 = getelementptr inbounds i64, ptr %array1, i64 %add3
-  store i64 %spec.select, ptr %arrayidx4, align 8
-  %add5 = add nsw i64 %a, 35
-  %arrayidx6 = getelementptr inbounds i64, ptr %array1, i64 %add5
-  store i64 %add, ptr %arrayidx6, align 8
-  ret void
-}


        


More information about the llvm-commits mailing list