[llvm] e100d2b - [DAGCombiner] Fold subtraction if above a constant threshold to `umin` (#135194)

via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 11 06:00:45 PDT 2025


Author: Piotr Fusik
Date: 2025-04-11T15:00:40+02:00
New Revision: e100d2bf9ad89139efe5b3edc441c2c67cc42bf4

URL: https://github.com/llvm/llvm-project/commit/e100d2bf9ad89139efe5b3edc441c2c67cc42bf4
DIFF: https://github.com/llvm/llvm-project/commit/e100d2bf9ad89139efe5b3edc441c2c67cc42bf4.diff

LOG: [DAGCombiner] Fold subtraction if above a constant threshold to `umin` (#135194)

Like #134235, but with a constant.
It's a pattern in Adler-32 checksum calculation in zlib.

Example:

    unsigned adler32_mod(unsigned x) {
      return x >= 65521u ? x - 65521u : x;
    }

Before, on RISC-V:

    lui     a1, 16
    lui     a2, 1048560
    addiw   a1, a1, -16
    sltu    a1, a1, a0
    negw    a1, a1
    addi    a2, a2, 15
    and     a1, a1, a2
    addw    a0, a0, a1

Or, with Zicond:

    lui     a1, 16
    lui     a2, 1048560
    addiw   a1, a1, -16
    sltu    a1, a1, a0
    addi    a2, a2, 15
    czero.eqz  a1, a2, a1
    addw    a0, a0, a1

After, with Zbb:

    lui     a1, 1048560
    addi    a1, a1, 15
    addw    a1, a0, a1
    minu    a0, a1, a0

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/RISCV/rv32zbb.ll
    llvm/test/CodeGen/RISCV/rv64zbb.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 967284944c658..b322fe670d4a7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -845,6 +845,13 @@ namespace {
       return TLI.isOperationLegalOrCustom(Opcode, VT, LegalOperations);
     }
 
+    bool hasUMin(EVT VT) const {
+      auto LK = TLI.getTypeConversion(*DAG.getContext(), VT);
+      return (LK.first == TargetLoweringBase::TypeLegal ||
+              LK.first == TargetLoweringBase::TypePromoteInteger) &&
+             TLI.isOperationLegal(ISD::UMIN, LK.second);
+    }
+
   public:
     /// Runs the dag combiner on all nodes in the work list
     void Run(CombineLevel AtLevel);
@@ -4253,10 +4260,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
 
   // (sub x, (select (ult x, y), 0, y)) -> (umin x, (sub x, y))
   // (sub x, (select (uge x, y), y, 0)) -> (umin x, (sub x, y))
-  auto LK = TLI.getTypeConversion(*DAG.getContext(), VT);
-  if ((LK.first == TargetLoweringBase::TypeLegal ||
-       LK.first == TargetLoweringBase::TypePromoteInteger) &&
-      TLI.isOperationLegal(ISD::UMIN, LK.second)) {
+  if (hasUMin(VT)) {
     SDValue Y;
     if (sd_match(N1, m_OneUse(m_Select(m_SetCC(m_Specific(N0), m_Value(Y),
                                                m_SpecificCondCode(ISD::SETULT)),
@@ -12074,6 +12078,17 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
 
     if (SDValue NewSel = SimplifySelect(DL, N0, N1, N2))
       return NewSel;
+
+    // (select (ugt x, C), (add x, ~C), x) -> (umin (add x, ~C), x)
+    // (select (ult x, C), x, (add x, -C)) -> (umin x, (add x, -C))
+    APInt C;
+    if (sd_match(Cond1, m_ConstInt(C)) && hasUMin(VT)) {
+      if ((CC == ISD::SETUGT && Cond0 == N2 &&
+           sd_match(N1, m_Add(m_Specific(N2), m_SpecificInt(~C)))) ||
+          (CC == ISD::SETULT && Cond0 == N1 &&
+           sd_match(N2, m_Add(m_Specific(N1), m_SpecificInt(-C)))))
+        return DAG.getNode(ISD::UMIN, DL, VT, N1, N2);
+    }
   }
 
   if (!VT.isVector())

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 5ac0add5ecfb6..1b9b1b89aeb7e 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -1720,13 +1720,20 @@ define i32 @sub_if_uge_multiuse_cmp_store_i32(i32 %x, i32 %y, ptr %z) {
 }
 
 define i8 @sub_if_uge_C_i8(i8 zeroext %x) {
-; CHECK-LABEL: sub_if_uge_C_i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sltiu a1, a0, 13
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    andi a1, a1, -13
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_C_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltiu a1, a0, 13
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    andi a1, a1, -13
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_C_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    addi a1, a0, -13
+; RV32ZBB-NEXT:    zext.b a1, a1
+; RV32ZBB-NEXT:    minu a0, a1, a0
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ugt i8 %x, 12
   %sub = add i8 %x, -13
   %conv4 = select i1 %cmp, i8 %sub, i8 %x
@@ -1734,13 +1741,20 @@ define i8 @sub_if_uge_C_i8(i8 zeroext %x) {
 }
 
 define i16 @sub_if_uge_C_i16(i16 zeroext %x) {
-; CHECK-LABEL: sub_if_uge_C_i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sltiu a1, a0, 251
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    andi a1, a1, -251
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_C_i16:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    sltiu a1, a0, 251
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    andi a1, a1, -251
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_C_i16:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    addi a1, a0, -251
+; RV32ZBB-NEXT:    zext.h a1, a1
+; RV32ZBB-NEXT:    minu a0, a1, a0
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ugt i16 %x, 250
   %sub = add i16 %x, -251
   %conv4 = select i1 %cmp, i16 %sub, i16 %x
@@ -1748,17 +1762,25 @@ define i16 @sub_if_uge_C_i16(i16 zeroext %x) {
 }
 
 define i32 @sub_if_uge_C_i32(i32 signext %x) {
-; CHECK-LABEL: sub_if_uge_C_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, 16
-; CHECK-NEXT:    lui a2, 1048560
-; CHECK-NEXT:    addi a1, a1, -16
-; CHECK-NEXT:    sltu a1, a1, a0
-; CHECK-NEXT:    neg a1, a1
-; CHECK-NEXT:    addi a2, a2, 15
-; CHECK-NEXT:    and a1, a1, a2
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_C_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    lui a2, 1048560
+; RV32I-NEXT:    addi a1, a1, -16
+; RV32I-NEXT:    sltu a1, a1, a0
+; RV32I-NEXT:    neg a1, a1
+; RV32I-NEXT:    addi a2, a2, 15
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_C_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a1, a1, 15
+; RV32ZBB-NEXT:    add a1, a0, a1
+; RV32ZBB-NEXT:    minu a0, a1, a0
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ugt i32 %x, 65520
   %sub = add i32 %x, -65521
   %cond = select i1 %cmp, i32 %sub, i32 %x
@@ -1797,18 +1819,30 @@ define i64 @sub_if_uge_C_i64(i64 %x) {
 }
 
 define i32 @sub_if_uge_C_multiuse_cmp_i32(i32 signext %x, ptr %z) {
-; CHECK-LABEL: sub_if_uge_C_multiuse_cmp_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a2, 16
-; CHECK-NEXT:    lui a3, 1048560
-; CHECK-NEXT:    addi a2, a2, -16
-; CHECK-NEXT:    sltu a2, a2, a0
-; CHECK-NEXT:    neg a4, a2
-; CHECK-NEXT:    addi a3, a3, 15
-; CHECK-NEXT:    and a3, a4, a3
-; CHECK-NEXT:    add a0, a0, a3
-; CHECK-NEXT:    sw a2, 0(a1)
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_C_multiuse_cmp_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 16
+; RV32I-NEXT:    lui a3, 1048560
+; RV32I-NEXT:    addi a2, a2, -16
+; RV32I-NEXT:    sltu a2, a2, a0
+; RV32I-NEXT:    neg a4, a2
+; RV32I-NEXT:    addi a3, a3, 15
+; RV32I-NEXT:    and a3, a4, a3
+; RV32I-NEXT:    add a0, a0, a3
+; RV32I-NEXT:    sw a2, 0(a1)
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_C_multiuse_cmp_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lui a2, 16
+; RV32ZBB-NEXT:    lui a3, 1048560
+; RV32ZBB-NEXT:    addi a2, a2, -16
+; RV32ZBB-NEXT:    addi a3, a3, 15
+; RV32ZBB-NEXT:    sltu a2, a2, a0
+; RV32ZBB-NEXT:    add a3, a0, a3
+; RV32ZBB-NEXT:    minu a0, a3, a0
+; RV32ZBB-NEXT:    sw a2, 0(a1)
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ugt i32 %x, 65520
   %conv = zext i1 %cmp to i32
   store i32 %conv, ptr %z, align 4
@@ -1818,20 +1852,29 @@ define i32 @sub_if_uge_C_multiuse_cmp_i32(i32 signext %x, ptr %z) {
 }
 
 define i32 @sub_if_uge_C_multiuse_sub_i32(i32 signext %x, ptr %z) {
-; CHECK-LABEL: sub_if_uge_C_multiuse_sub_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a2, 1048560
-; CHECK-NEXT:    lui a3, 16
-; CHECK-NEXT:    addi a2, a2, 15
-; CHECK-NEXT:    add a2, a0, a2
-; CHECK-NEXT:    addi a3, a3, -16
-; CHECK-NEXT:    sw a2, 0(a1)
-; CHECK-NEXT:    bltu a3, a0, .LBB62_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a2, a0
-; CHECK-NEXT:  .LBB62_2:
-; CHECK-NEXT:    mv a0, a2
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_C_multiuse_sub_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a2, 1048560
+; RV32I-NEXT:    lui a3, 16
+; RV32I-NEXT:    addi a2, a2, 15
+; RV32I-NEXT:    add a2, a0, a2
+; RV32I-NEXT:    addi a3, a3, -16
+; RV32I-NEXT:    sw a2, 0(a1)
+; RV32I-NEXT:    bltu a3, a0, .LBB62_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:  .LBB62_2:
+; RV32I-NEXT:    mv a0, a2
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_C_multiuse_sub_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lui a2, 1048560
+; RV32ZBB-NEXT:    addi a2, a2, 15
+; RV32ZBB-NEXT:    add a2, a0, a2
+; RV32ZBB-NEXT:    minu a0, a2, a0
+; RV32ZBB-NEXT:    sw a2, 0(a1)
+; RV32ZBB-NEXT:    ret
   %sub = add i32 %x, -65521
   store i32 %sub, ptr %z, align 4
   %cmp = icmp ugt i32 %x, 65520
@@ -1840,17 +1883,25 @@ define i32 @sub_if_uge_C_multiuse_sub_i32(i32 signext %x, ptr %z) {
 }
 
 define i32 @sub_if_uge_C_swapped_i32(i32 %x) {
-; CHECK-LABEL: sub_if_uge_C_swapped_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, 16
-; CHECK-NEXT:    lui a2, 1048560
-; CHECK-NEXT:    addi a1, a1, -15
-; CHECK-NEXT:    sltu a1, a0, a1
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    addi a2, a2, 15
-; CHECK-NEXT:    and a1, a1, a2
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV32I-LABEL: sub_if_uge_C_swapped_i32:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 16
+; RV32I-NEXT:    lui a2, 1048560
+; RV32I-NEXT:    addi a1, a1, -15
+; RV32I-NEXT:    sltu a1, a0, a1
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    addi a2, a2, 15
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-LABEL: sub_if_uge_C_swapped_i32:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a1, a1, 15
+; RV32ZBB-NEXT:    add a1, a0, a1
+; RV32ZBB-NEXT:    minu a0, a0, a1
+; RV32ZBB-NEXT:    ret
   %cmp = icmp ult i32 %x, 65521
   %sub = add i32 %x, -65521
   %cond = select i1 %cmp, i32 %x, i32 %sub

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 149ea9aa575bf..25325ad7d50a4 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -1886,13 +1886,20 @@ define i32 @sub_if_uge_multiuse_cmp_store_i32(i32 signext %x, i32 signext %y, pt
 }
 
 define i8 @sub_if_uge_C_i8(i8 zeroext %x) {
-; CHECK-LABEL: sub_if_uge_C_i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sltiu a1, a0, 13
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    andi a1, a1, -13
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltiu a1, a0, 13
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    andi a1, a1, -13
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    addi a1, a0, -13
+; RV64ZBB-NEXT:    zext.b a1, a1
+; RV64ZBB-NEXT:    minu a0, a1, a0
+; RV64ZBB-NEXT:    ret
   %cmp = icmp ugt i8 %x, 12
   %sub = add i8 %x, -13
   %conv4 = select i1 %cmp, i8 %sub, i8 %x
@@ -1900,13 +1907,20 @@ define i8 @sub_if_uge_C_i8(i8 zeroext %x) {
 }
 
 define i16 @sub_if_uge_C_i16(i16 zeroext %x) {
-; CHECK-LABEL: sub_if_uge_C_i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    sltiu a1, a0, 251
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    andi a1, a1, -251
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_i16:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sltiu a1, a0, 251
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    andi a1, a1, -251
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_i16:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    addi a1, a0, -251
+; RV64ZBB-NEXT:    zext.h a1, a1
+; RV64ZBB-NEXT:    minu a0, a1, a0
+; RV64ZBB-NEXT:    ret
   %cmp = icmp ugt i16 %x, 250
   %sub = add i16 %x, -251
   %conv4 = select i1 %cmp, i16 %sub, i16 %x
@@ -1914,17 +1928,25 @@ define i16 @sub_if_uge_C_i16(i16 zeroext %x) {
 }
 
 define i32 @sub_if_uge_C_i32(i32 signext %x) {
-; CHECK-LABEL: sub_if_uge_C_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, 16
-; CHECK-NEXT:    lui a2, 1048560
-; CHECK-NEXT:    addiw a1, a1, -16
-; CHECK-NEXT:    sltu a1, a1, a0
-; CHECK-NEXT:    negw a1, a1
-; CHECK-NEXT:    addi a2, a2, 15
-; CHECK-NEXT:    and a1, a1, a2
-; CHECK-NEXT:    addw a0, a0, a1
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    lui a2, 1048560
+; RV64I-NEXT:    addiw a1, a1, -16
+; RV64I-NEXT:    sltu a1, a1, a0
+; RV64I-NEXT:    negw a1, a1
+; RV64I-NEXT:    addi a2, a2, 15
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addi a1, a1, 15
+; RV64ZBB-NEXT:    addw a1, a0, a1
+; RV64ZBB-NEXT:    minu a0, a1, a0
+; RV64ZBB-NEXT:    ret
   %cmp = icmp ugt i32 %x, 65520
   %sub = add i32 %x, -65521
   %cond = select i1 %cmp, i32 %sub, i32 %x
@@ -1932,20 +1954,29 @@ define i32 @sub_if_uge_C_i32(i32 signext %x) {
 }
 
 define i64 @sub_if_uge_C_i64(i64 %x) {
-; CHECK-LABEL: sub_if_uge_C_i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, 298
-; CHECK-NEXT:    lui a2, 1046192
-; CHECK-NEXT:    addiw a1, a1, 95
-; CHECK-NEXT:    addiw a2, a2, -761
-; CHECK-NEXT:    slli a1, a1, 12
-; CHECK-NEXT:    addi a1, a1, 511
-; CHECK-NEXT:    sltu a1, a1, a0
-; CHECK-NEXT:    neg a1, a1
-; CHECK-NEXT:    slli a2, a2, 9
-; CHECK-NEXT:    and a1, a1, a2
-; CHECK-NEXT:    add a0, a0, a1
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 298
+; RV64I-NEXT:    lui a2, 1046192
+; RV64I-NEXT:    addiw a1, a1, 95
+; RV64I-NEXT:    addiw a2, a2, -761
+; RV64I-NEXT:    slli a1, a1, 12
+; RV64I-NEXT:    addi a1, a1, 511
+; RV64I-NEXT:    sltu a1, a1, a0
+; RV64I-NEXT:    neg a1, a1
+; RV64I-NEXT:    slli a2, a2, 9
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_i64:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a1, 1046192
+; RV64ZBB-NEXT:    addiw a1, a1, -761
+; RV64ZBB-NEXT:    slli a1, a1, 9
+; RV64ZBB-NEXT:    add a1, a0, a1
+; RV64ZBB-NEXT:    minu a0, a1, a0
+; RV64ZBB-NEXT:    ret
   %cmp = icmp ugt i64 %x, 4999999999
   %sub = add i64 %x, -5000000000
   %cond = select i1 %cmp, i64 %sub, i64 %x
@@ -1953,18 +1984,30 @@ define i64 @sub_if_uge_C_i64(i64 %x) {
 }
 
 define i32 @sub_if_uge_C_multiuse_cmp_i32(i32 signext %x, ptr %z) {
-; CHECK-LABEL: sub_if_uge_C_multiuse_cmp_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a2, 16
-; CHECK-NEXT:    lui a3, 1048560
-; CHECK-NEXT:    addiw a2, a2, -16
-; CHECK-NEXT:    sltu a2, a2, a0
-; CHECK-NEXT:    negw a4, a2
-; CHECK-NEXT:    addi a3, a3, 15
-; CHECK-NEXT:    and a3, a4, a3
-; CHECK-NEXT:    addw a0, a0, a3
-; CHECK-NEXT:    sw a2, 0(a1)
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_multiuse_cmp_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 16
+; RV64I-NEXT:    lui a3, 1048560
+; RV64I-NEXT:    addiw a2, a2, -16
+; RV64I-NEXT:    sltu a2, a2, a0
+; RV64I-NEXT:    negw a4, a2
+; RV64I-NEXT:    addi a3, a3, 15
+; RV64I-NEXT:    and a3, a4, a3
+; RV64I-NEXT:    addw a0, a0, a3
+; RV64I-NEXT:    sw a2, 0(a1)
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_multiuse_cmp_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a2, 16
+; RV64ZBB-NEXT:    lui a3, 1048560
+; RV64ZBB-NEXT:    addiw a2, a2, -16
+; RV64ZBB-NEXT:    addi a3, a3, 15
+; RV64ZBB-NEXT:    sltu a2, a2, a0
+; RV64ZBB-NEXT:    addw a3, a0, a3
+; RV64ZBB-NEXT:    minu a0, a3, a0
+; RV64ZBB-NEXT:    sw a2, 0(a1)
+; RV64ZBB-NEXT:    ret
   %cmp = icmp ugt i32 %x, 65520
   %conv = zext i1 %cmp to i32
   store i32 %conv, ptr %z, align 4
@@ -1974,20 +2017,29 @@ define i32 @sub_if_uge_C_multiuse_cmp_i32(i32 signext %x, ptr %z) {
 }
 
 define i32 @sub_if_uge_C_multiuse_sub_i32(i32 signext %x, ptr %z) {
-; CHECK-LABEL: sub_if_uge_C_multiuse_sub_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a2, 1048560
-; CHECK-NEXT:    lui a3, 16
-; CHECK-NEXT:    addi a2, a2, 15
-; CHECK-NEXT:    addw a2, a0, a2
-; CHECK-NEXT:    addiw a3, a3, -16
-; CHECK-NEXT:    sw a2, 0(a1)
-; CHECK-NEXT:    bltu a3, a0, .LBB75_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a2, a0
-; CHECK-NEXT:  .LBB75_2:
-; CHECK-NEXT:    mv a0, a2
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_multiuse_sub_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a2, 1048560
+; RV64I-NEXT:    lui a3, 16
+; RV64I-NEXT:    addi a2, a2, 15
+; RV64I-NEXT:    addw a2, a0, a2
+; RV64I-NEXT:    addiw a3, a3, -16
+; RV64I-NEXT:    sw a2, 0(a1)
+; RV64I-NEXT:    bltu a3, a0, .LBB75_2
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    mv a2, a0
+; RV64I-NEXT:  .LBB75_2:
+; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_multiuse_sub_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a2, 1048560
+; RV64ZBB-NEXT:    addi a2, a2, 15
+; RV64ZBB-NEXT:    addw a2, a0, a2
+; RV64ZBB-NEXT:    minu a0, a2, a0
+; RV64ZBB-NEXT:    sw a2, 0(a1)
+; RV64ZBB-NEXT:    ret
   %sub = add i32 %x, -65521
   store i32 %sub, ptr %z, align 4
   %cmp = icmp ugt i32 %x, 65520
@@ -1996,17 +2048,25 @@ define i32 @sub_if_uge_C_multiuse_sub_i32(i32 signext %x, ptr %z) {
 }
 
 define i32 @sub_if_uge_C_swapped_i32(i32 signext %x) {
-; CHECK-LABEL: sub_if_uge_C_swapped_i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, 16
-; CHECK-NEXT:    lui a2, 1048560
-; CHECK-NEXT:    addiw a1, a1, -15
-; CHECK-NEXT:    sltu a1, a0, a1
-; CHECK-NEXT:    addi a1, a1, -1
-; CHECK-NEXT:    addi a2, a2, 15
-; CHECK-NEXT:    and a1, a1, a2
-; CHECK-NEXT:    addw a0, a0, a1
-; CHECK-NEXT:    ret
+; RV64I-LABEL: sub_if_uge_C_swapped_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, 16
+; RV64I-NEXT:    lui a2, 1048560
+; RV64I-NEXT:    addiw a1, a1, -15
+; RV64I-NEXT:    sltu a1, a0, a1
+; RV64I-NEXT:    addi a1, a1, -1
+; RV64I-NEXT:    addi a2, a2, 15
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    addw a0, a0, a1
+; RV64I-NEXT:    ret
+;
+; RV64ZBB-LABEL: sub_if_uge_C_swapped_i32:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addi a1, a1, 15
+; RV64ZBB-NEXT:    addw a1, a0, a1
+; RV64ZBB-NEXT:    minu a0, a0, a1
+; RV64ZBB-NEXT:    ret
   %cmp = icmp ult i32 %x, 65521
   %sub = add i32 %x, -65521
   %cond = select i1 %cmp, i32 %x, i32 %sub


        


More information about the llvm-commits mailing list