[llvm] [AArch64] Extend condition optimizer to support unsigned comparisons (PR #144380)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 29 06:12:06 PST 2026


https://github.com/SiliconA-Z updated https://github.com/llvm/llvm-project/pull/144380

>From 0c5e84ab6910563b717271324aef541b3a2351bb Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 22 Jan 2026 10:31:32 -0500
Subject: [PATCH 1/2] [AArch64] Add support for unsigned comparisons

We have to be extra careful to not allow unsigned wraps, however. This also required some adjusting of the logic in adjustCmp, as well as compare the true imm value with add or sub taken into effect.

Because SIGNED_MIN and SIGNED_MAX cannot be an immediate, we do not need to worry about those edge cases when dealing with unsigned comparisons.
---
 .../AArch64/AArch64ConditionOptimizer.cpp     |  97 ++-
 .../AArch64/aarch64-condopt-unsigned.mir      | 430 ++++++++++++
 .../AArch64/combine-comparisons-by-cse.ll     | 663 ++++++++++++++++++
 3 files changed, 1165 insertions(+), 25 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/aarch64-condopt-unsigned.mir

diff --git a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index f428252e143ba..8b64968c558a4 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -220,10 +220,22 @@ static int getComplementOpc(int Opc) {
 // Changes form of comparison inclusive <-> exclusive.
 static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
   switch (Cmp) {
-  case AArch64CC::GT: return AArch64CC::GE;
-  case AArch64CC::GE: return AArch64CC::GT;
-  case AArch64CC::LT: return AArch64CC::LE;
-  case AArch64CC::LE: return AArch64CC::LT;
+  case AArch64CC::GT:
+    return AArch64CC::GE;
+  case AArch64CC::GE:
+    return AArch64CC::GT;
+  case AArch64CC::LT:
+    return AArch64CC::LE;
+  case AArch64CC::LE:
+    return AArch64CC::LT;
+  case AArch64CC::HI:
+    return AArch64CC::HS;
+  case AArch64CC::HS:
+    return AArch64CC::HI;
+  case AArch64CC::LO:
+    return AArch64CC::LS;
+  case AArch64CC::LS:
+    return AArch64CC::LO;
   default:
     llvm_unreachable("Unexpected condition code");
   }
@@ -231,15 +243,20 @@ static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
 
 // Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
 // operator and condition code.
-AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
-    MachineInstr *CmpMI, AArch64CC::CondCode Cmp) {
+AArch64ConditionOptimizer::CmpInfo
+AArch64ConditionOptimizer::adjustCmp(MachineInstr *CmpMI,
+                                     AArch64CC::CondCode Cmp) {
   unsigned Opc = CmpMI->getOpcode();
+  unsigned OldOpc = Opc;
+
+  bool IsSigned = Cmp == AArch64CC::GT || Cmp == AArch64CC::GE ||
+                  Cmp == AArch64CC::LT || Cmp == AArch64CC::LE;
 
   // CMN (compare with negative immediate) is an alias to ADDS (as
   // "operand - negative" == "operand + positive")
   bool Negative = (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri);
 
-  int Correction = (Cmp == AArch64CC::GT) ? 1 : -1;
+  int Correction = (Cmp == AArch64CC::GT || Cmp == AArch64CC::HI) ? 1 : -1;
   // Negate Correction value for comparison with negative immediate (CMN).
   if (Negative) {
     Correction = -Correction;
@@ -248,13 +265,23 @@ AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
   const int OldImm = (int)CmpMI->getOperand(2).getImm();
   const int NewImm = std::abs(OldImm + Correction);
 
-  // Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
-  // adjusting compare instruction opcode.
-  if (OldImm == 0 && ((Negative && Correction == 1) ||
-                      (!Negative && Correction == -1))) {
+  // Handle cmn 1 -> cmp 0, transitions by adjusting compare instruction opcode.
+  if (OldImm == 1 && Negative && Correction == -1) {
+    // If we are adjusting from -1 to 0, we need to change the opcode.
+    Opc = getComplementOpc(Opc);
+  }
+
+  // Handle +0 -> -1 transitions by adjusting compare instruction opcode.
+  assert((OldImm != 0 || !Negative) && "Should not encounter cmn 0!");
+  if (OldImm == 0 && Correction == -1) {
     Opc = getComplementOpc(Opc);
   }
 
+  // If we change opcodes, this means we did an unsigned wrap, so return the old
+  // cmp.
+  if (!IsSigned && Opc != OldOpc)
+    return CmpInfo(OldImm, OldOpc, Cmp);
+
   return CmpInfo(NewImm, Opc, getAdjustedCmp(Cmp));
 }
 
@@ -333,6 +360,14 @@ bool AArch64ConditionOptimizer::isPureCmp(MachineInstr &CmpMI) {
   return true;
 }
 
+static bool isGreaterThan(AArch64CC::CondCode Cmp) {
+  return Cmp == AArch64CC::GT || Cmp == AArch64CC::HI;
+}
+
+static bool isLessThan(AArch64CC::CondCode Cmp) {
+  return Cmp == AArch64CC::LT || Cmp == AArch64CC::LO;
+}
+
 // This function transforms two CMP+CSINC pairs within the same basic block
 // when both conditions are the same (GT/GT or LT/LT) and immediates differ
 // by 1.
@@ -443,15 +478,16 @@ bool AArch64ConditionOptimizer::optimizeIntraBlock(MachineBasicBlock &MBB) {
                     << " and " << AArch64CC::getCondCodeName(SecondCond) << " #"
                     << SecondImm << '\n');
 
-  // Check if both conditions are the same and immediates differ by 1
-  if (((FirstCond == AArch64CC::GT && SecondCond == AArch64CC::GT) ||
-       (FirstCond == AArch64CC::LT && SecondCond == AArch64CC::LT)) &&
+  // Check if both conditions are the same (GT/GT, LT/LT, HI/HI, LO/LO)
+  // and immediates differ by 1.
+  if (FirstCond == SecondCond &&
+      (isGreaterThan(FirstCond) || isLessThan(FirstCond)) &&
       std::abs(SecondImm - FirstImm) == 1) {
     // Pick which comparison to adjust to match the other
-    // For GT: adjust the one with smaller immediate
-    // For LT: adjust the one with larger immediate
+    // For GT/HI: adjust the one with smaller immediate
+    // For LT/LO: adjust the one with larger immediate
     bool adjustFirst = (FirstImm < SecondImm);
-    if (FirstCond == AArch64CC::LT) {
+    if (isLessThan(FirstCond)) {
       adjustFirst = !adjustFirst;
     }
 
@@ -533,6 +569,9 @@ bool AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
   const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
   const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
 
+  int HeadImmTrueValue = HeadImm;
+  int TrueImmTrueValue = TrueImm;
+
   LLVM_DEBUG(dbgs() << "Head branch:\n");
   LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp)
                     << '\n');
@@ -543,9 +582,17 @@ bool AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
                     << '\n');
   LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
 
-  if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
-       (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
-      std::abs(TrueImm - HeadImm) == 2) {
+  unsigned Opc = HeadCmpMI->getOpcode();
+  if (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri)
+    HeadImmTrueValue = -HeadImmTrueValue;
+
+  Opc = TrueCmpMI->getOpcode();
+  if (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri)
+    TrueImmTrueValue = -TrueImmTrueValue;
+
+  if (((isGreaterThan(HeadCmp) && isLessThan(TrueCmp)) ||
+       (isLessThan(HeadCmp) && isGreaterThan(TrueCmp))) &&
+      std::abs(TrueImmTrueValue - HeadImmTrueValue) == 2) {
     // This branch transforms machine instructions that correspond to
     //
     // 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
@@ -564,9 +611,9 @@ bool AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
       modifyCmp(TrueCmpMI, TrueCmpInfo);
       return true;
     }
-  } else if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::GT) ||
-              (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::LT)) &&
-             std::abs(TrueImm - HeadImm) == 1) {
+  } else if (((isGreaterThan(HeadCmp) && isGreaterThan(TrueCmp)) ||
+              (isLessThan(HeadCmp) && isLessThan(TrueCmp))) &&
+             std::abs(TrueImmTrueValue - HeadImmTrueValue) == 1) {
     // This branch transforms machine instructions that correspond to
     //
     // 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
@@ -579,8 +626,8 @@ bool AArch64ConditionOptimizer::optimizeCrossBlock(MachineBasicBlock &HBB) {
 
     // GT -> GE transformation increases immediate value, so picking the
     // smaller one; LT -> LE decreases immediate value so invert the choice.
-    bool adjustHeadCond = (HeadImm < TrueImm);
-    if (HeadCmp == AArch64CC::LT) {
+    bool adjustHeadCond = (HeadImmTrueValue < TrueImmTrueValue);
+    if (isLessThan(HeadCmp)) {
       adjustHeadCond = !adjustHeadCond;
     }
 
diff --git a/llvm/test/CodeGen/AArch64/aarch64-condopt-unsigned.mir b/llvm/test/CodeGen/AArch64/aarch64-condopt-unsigned.mir
new file mode 100644
index 0000000000000..37143ffab50d1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/aarch64-condopt-unsigned.mir
@@ -0,0 +1,430 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64 -o - %s -run-pass=aarch64-condopt | FileCheck %s
+
+# Test unsigned comparison optimization in AArch64ConditionOptimizer.
+#
+# Condition codes used:
+#   HI = 8  (unsigned greater than)
+#   HS = 2  (unsigned greater than or equal)
+#   LO = 3  (unsigned less than)
+#   LS = 9  (unsigned less than or equal)
+#
+# The pass transforms:
+#   (a > 10 && ...) || (a > 9 && ...)  ->  (a > 10 && ...) || (a >= 10 && ...)
+# by adjusting one compare's immediate and condition code.
+
+---
+# CMP/CMP with HI/HI (unsigned >): imm differs by 1
+# Head: cmp w0, #10; b.hi  (w0 > 10 unsigned)
+# True: cmp w0, #11; b.hi  (w0 > 11 unsigned)
+# Expected: One CMP adjusted to HS (>=)
+name:            cmp_cmp_hi_hi
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmp_cmp_hi_hi
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 2, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 8, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+  ; CHECK-NEXT:   $w0 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $w0
+  bb.0:
+    liveins: $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = SUBSWri %0, 10, 0, implicit-def $nzcv
+    Bcc 8, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $w0
+    %2:gpr32 = SUBSWri %0, 11, 0, implicit-def $nzcv
+    Bcc 8, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $w0
+    %3:gpr32 = MOVi32imm 1
+    $w0 = COPY %3
+    RET undef $lr, implicit $w0
+...
+
+---
+# CMP/CMP with LO/LO (unsigned <): imm differs by 1
+# Head: cmp w0, #10; b.lo  (w0 < 10 unsigned)
+# True: cmp w0, #11; b.lo  (w0 < 11 unsigned)
+# Expected: One CMP adjusted to LS (<=)
+name:            cmp_cmp_lo_lo
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmp_cmp_lo_lo
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 10, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 3, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 10, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 9, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+  ; CHECK-NEXT:   $w0 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $w0
+  bb.0:
+    liveins: $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = SUBSWri %0, 10, 0, implicit-def $nzcv
+    Bcc 3, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $w0
+    %2:gpr32 = SUBSWri %0, 11, 0, implicit-def $nzcv
+    Bcc 3, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $w0
+    %3:gpr32 = MOVi32imm 1
+    $w0 = COPY %3
+    RET undef $lr, implicit $w0
+...
+
+---
+# CMP/CMP with HI/LO (unsigned > and <): imm differs by 2
+# Head: cmp w0, #10; b.hi  (w0 > 10 unsigned)
+# True: cmp w0, #12; b.lo  (w0 < 12 unsigned)
+# This is the "a > 10 || a < 12" pattern -> always true for a == 11
+# Expected: Adjusted to use same immediate
+name:            cmp_cmp_hi_lo
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmp_cmp_hi_lo
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 2, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 9, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+  ; CHECK-NEXT:   $w0 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $w0
+  bb.0:
+    liveins: $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = SUBSWri %0, 10, 0, implicit-def $nzcv
+    Bcc 8, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $w0
+    %2:gpr32 = SUBSWri %0, 12, 0, implicit-def $nzcv
+    Bcc 3, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $w0
+    %3:gpr32 = MOVi32imm 1
+    $w0 = COPY %3
+    RET undef $lr, implicit $w0
+...
+
+---
+# CMN/CMN with HI/HI: using ADDSWri (compare with negated immediate)
+# Head: cmn w0, #1; b.hi  (w0 > -1 unsigned, i.e., w0 != 0xFFFFFFFF)
+# True: cmn w0, #2; b.hi  (w0 > -2 unsigned)
+name:            cmn_cmn_hi_hi
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmn_cmn_hi_hi
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[ADDSWri:%[0-9]+]]:gpr32 = ADDSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 8, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADDSWri1:%[0-9]+]]:gpr32 = ADDSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 2, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+  ; CHECK-NEXT:   $w0 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $w0
+  bb.0:
+    liveins: $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = ADDSWri %0, 1, 0, implicit-def $nzcv
+    Bcc 8, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $w0
+    %2:gpr32 = ADDSWri %0, 2, 0, implicit-def $nzcv
+    Bcc 8, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $w0
+    %3:gpr32 = MOVi32imm 1
+    $w0 = COPY %3
+    RET undef $lr, implicit $w0
+...
+
+---
+# CSINC/CSINC with HI/HI (unsigned >): imm differs by 1
+name:            csinc_hi_hi_intrablock
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+    ; CHECK-LABEL: name: csinc_hi_hi_intrablock
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 2, implicit $nzcv
+    ; CHECK-NEXT: [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 8, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr1]]
+    ; CHECK-NEXT: RET undef $lr, implicit $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = COPY %0
+    %2:gpr32 = SUBSWri %0, 10, 0, implicit-def $nzcv
+    %3:gpr32 = CSINCWr %1, $wzr, 8, implicit $nzcv
+    %4:gpr32 = SUBSWri %0, 11, 0, implicit-def $nzcv
+    %5:gpr32 = CSINCWr %1, $wzr, 8, implicit $nzcv
+    $w0 = COPY %5
+    RET undef $lr, implicit $w0
+...
+
+---
+# CSINC/CSINC with LO/LO (unsigned <): imm differs by 1
+name:            csinc_lo_lo_intrablock
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+    ; CHECK-LABEL: name: csinc_lo_lo_intrablock
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 10, 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 3, implicit $nzcv
+    ; CHECK-NEXT: [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 10, 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr1]]
+    ; CHECK-NEXT: RET undef $lr, implicit $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = COPY %0
+    %2:gpr32 = SUBSWri %0, 10, 0, implicit-def $nzcv
+    %3:gpr32 = CSINCWr %1, $wzr, 3, implicit $nzcv
+    %4:gpr32 = SUBSWri %0, 11, 0, implicit-def $nzcv
+    %5:gpr32 = CSINCWr %1, $wzr, 3, implicit $nzcv
+    $w0 = COPY %5
+    RET undef $lr, implicit $w0
+...
+
+---
+# Test: HI with immediate 0 can be optimized by adjusting upward
+# Head: cmp w0, #0; b.hi  (w0 > 0 unsigned)
+# True: cmp w0, #1; b.hi  (w0 > 1 unsigned)
+# Expected: Head adjusted to cmp #1; b.hs (w0 >= 1, equivalent to w0 > 0)
+name:            cmp_cmp_hi_hi_wrap
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmp_cmp_hi_hi_wrap
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 2, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 8, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+  ; CHECK-NEXT:   $w0 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $w0
+  bb.0:
+    liveins: $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = SUBSWri %0, 0, 0, implicit-def $nzcv
+    Bcc 8, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $w0
+    %2:gpr32 = SUBSWri %0, 1, 0, implicit-def $nzcv
+    Bcc 8, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $w0
+    %3:gpr32 = MOVi32imm 1
+    $w0 = COPY %3
+    RET undef $lr, implicit $w0
+...
+
+---
+# Mixed signed/unsigned should NOT optimize
+# Head: cmp w0, #10; b.gt  (signed >)
+# True: cmp w0, #11; b.hi  (unsigned >)
+name:            cmp_cmp_gt_hi_no_opt
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmp_cmp_gt_hi_no_opt
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 10, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 8, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+  ; CHECK-NEXT:   $w0 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $w0
+  bb.0:
+    liveins: $w0
+    %0:gpr32common = COPY $w0
+    %1:gpr32 = SUBSWri %0, 10, 0, implicit-def $nzcv
+    Bcc 12, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $w0
+    %2:gpr32 = SUBSWri %0, 11, 0, implicit-def $nzcv
+    Bcc 8, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $w0
+    %3:gpr32 = MOVi32imm 1
+    $w0 = COPY %3
+    RET undef $lr, implicit $w0
+...
+
+---
+# 64-bit variant: CMP/CMP with HI/HI
+name:            cmp_cmp_hi_hi_64
+tracksRegLiveness: true
+body:             |
+  ; CHECK-LABEL: name: cmp_cmp_hi_hi_64
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64common = COPY $x0
+  ; CHECK-NEXT:   [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 2, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[SUBSXri1:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 11, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 8, %bb.2, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1
+  ; CHECK-NEXT:   $x0 = COPY [[MOVi64imm]]
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  bb.0:
+    liveins: $x0
+    %0:gpr64common = COPY $x0
+    %1:gpr64 = SUBSXri %0, 10, 0, implicit-def $nzcv
+    Bcc 8, %bb.1, implicit $nzcv
+    B %bb.2
+
+  bb.1:
+    liveins: $x0
+    %2:gpr64 = SUBSXri %0, 11, 0, implicit-def $nzcv
+    Bcc 8, %bb.2, implicit $nzcv
+    B %bb.2
+
+  bb.2:
+    liveins: $x0
+    %3:gpr64 = MOVi64imm 1
+    $x0 = COPY %3
+    RET undef $lr, implicit $x0
+...
diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index c261013303aa8..27b231cb56d70 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -1018,6 +1018,669 @@ return:                                           ; preds = %if.end, %land.lhs.t
   ret i32 %retval.0
 }
 
+
+; (a > 10 && b == c) || (a >= 10 && b == d)
+define i32 @combine_ugt_uge_10() #0 {
+; CHECK-LABEL: combine_ugt_uge_10:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #10
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    b.ls .LBB16_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w10, w9
+; CHECK-NEXT:    b.ne .LBB16_4
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB16_3: // %lor.lhs.false
+; CHECK-NEXT:    b.lo .LBB16_6
+; CHECK-NEXT:  .LBB16_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB16_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB16_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, 10
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %land.lhs.true3
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, 9
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a > 5 && b == c) || (a < 5 && b == d)
+define i32 @combine_ugt_ult_5() #0 {
+; CHECK-LABEL: combine_ugt_ult_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5
+; CHECK-NEXT:    b.ls .LBB17_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB17_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB17_3: // %lor.lhs.false
+; CHECK-NEXT:    b.hs .LBB17_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB17_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB17_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, 5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ult i32 %0, 5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a < 5 && b == c) || (a <= 5 && b == d)
+define i32 @combine_ult_uge_5() #0 {
+; CHECK-LABEL: combine_ult_uge_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    b.hs .LBB18_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w10, w9
+; CHECK-NEXT:    b.ne .LBB18_4
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB18_3: // %lor.lhs.false
+; CHECK-NEXT:    b.hi .LBB18_6
+; CHECK-NEXT:  .LBB18_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB18_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB18_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, 5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %land.lhs.true3
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ult i32 %0, 6
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a < 5 && b == c) || (a > 5 && b == d)
+define i32 @combine_ult_ugt_5() #0 {
+; CHECK-LABEL: combine_ult_ugt_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5
+; CHECK-NEXT:    b.hs .LBB19_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB19_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB19_3: // %lor.lhs.false
+; CHECK-NEXT:    b.ls .LBB19_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB19_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB19_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, 5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, 5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a > -5 && b == c) || (a < -5 && b == d)
+define i32 @combine_ugt_ult_n5() #0 {
+; CHECK-LABEL: combine_ugt_ult_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #5
+; CHECK-NEXT:    b.ls .LBB20_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB20_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB20_3: // %lor.lhs.false
+; CHECK-NEXT:    b.hs .LBB20_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB20_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB20_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, -5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ult i32 %0, -5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; (a < -5 && b == c) || (a > -5 && b == d)
+define i32 @combine_ult_ugt_n5() #0 {
+; CHECK-LABEL: combine_ult_ugt_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #5
+; CHECK-NEXT:    b.hs .LBB21_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB21_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB21_3: // %lor.lhs.false
+; CHECK-NEXT:    b.ls .LBB21_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB21_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB21_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, -5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, -5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; Yes, you can mix them too!
+; (a < -5 && b == c) || (a u> -5 && b == d)
+define i32 @combine_ult_gt_n5() #0 {
+; CHECK-LABEL: combine_ult_gt_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #5
+; CHECK-NEXT:    b.hs .LBB22_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB22_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB22_3: // %lor.lhs.false
+; CHECK-NEXT:    b.le .LBB22_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB22_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB22_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ult i32 %0, -5
+  br i1 %cmp, label %land.lhs.true, label %lor.lhs.false
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %if.end
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp sgt i32 %0, -5
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false, %land.lhs.true
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+; Test in the following case, we don't hit 'cmp' and trigger a false positive
+; cmp  w19, #0
+; cinc w0, w19, gt
+; ...
+; fcmp d8, #0.0
+; b.gt .LBB0_5
+
+define i32 @fcmpri_u(i32 %argc, ptr nocapture readonly %argv) #0 {
+; CHECK-LABEL: fcmpri_u:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w0, #2
+; CHECK-NEXT:    b.lo .LBB23_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    ldr x8, [x1, #8]
+; CHECK-NEXT:    cbz x8, .LBB23_3
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #3 // =0x3
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB23_3: // %if.end
+; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    .cfi_offset b8, -32
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    bl zoo
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    mov w0, #-1 // =0xffffffff
+; CHECK-NEXT:    bl yoo
+; CHECK-NEXT:    cmp w19, #0
+; CHECK-NEXT:    mov w1, #2 // =0x2
+; CHECK-NEXT:    fmov d8, d0
+; CHECK-NEXT:    cinc w0, w19, ne
+; CHECK-NEXT:    bl xoo
+; CHECK-NEXT:    fmov d0, #-1.00000000
+; CHECK-NEXT:    fcmp d8, #0.0
+; CHECK-NEXT:    fmov d1, #-2.00000000
+; CHECK-NEXT:    fadd d0, d8, d0
+; CHECK-NEXT:    fcsel d0, d8, d0, gt
+; CHECK-NEXT:    bl woo
+; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    mov w0, #4 // =0x4
+; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    .cfi_restore w19
+; CHECK-NEXT:    .cfi_restore w30
+; CHECK-NEXT:    .cfi_restore b8
+; CHECK-NEXT:    ret
+
+; CHECK-LABEL-DAG: .LBB9_3
+
+entry:
+  %cmp = icmp ugt i32 %argc, 1
+  br i1 %cmp, label %land.lhs.true, label %if.end
+
+land.lhs.true:                                    ; preds = %entry
+  %arrayidx = getelementptr inbounds ptr, ptr %argv, i64 1
+  %0 = load ptr, ptr %arrayidx, align 8
+  %cmp1 = icmp eq ptr %0, null
+  br i1 %cmp1, label %if.end, label %return
+
+if.end:                                           ; preds = %land.lhs.true, %entry
+  %call = call i32 @zoo(i32 1)
+  %call2 = call double @yoo(i32 -1)
+  %cmp4 = icmp ugt i32 %call, 0
+  %add = zext i1 %cmp4 to i32
+  %cond = add nuw i32 %add, %call
+  %call7 = call i32 @xoo(i32 %cond, i32 2)
+  %cmp9 = fcmp ogt double %call2, 0.000000e+00
+  br i1 %cmp9, label %cond.end14, label %cond.false12
+
+cond.false12:                                     ; preds = %if.end
+  %sub = fadd fast double %call2, -1.000000e+00
+  br label %cond.end14
+
+cond.end14:                                       ; preds = %if.end, %cond.false12
+  %cond15 = phi double [ %sub, %cond.false12 ], [ %call2, %if.end ]
+  %call16 = call i32 @woo(double %cond15, double -2.000000e+00)
+  br label %return
+
+return:                                           ; preds = %land.lhs.true, %cond.end14
+  %retval.0 = phi i32 [ 4, %cond.end14 ], [ 3, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
+define void @cmp_shifted_unsigned(i32 %in, i32 %lhs, i32 %rhs) #0 {
+; CHECK-LABEL: cmp_shifted_unsigned:
+; CHECK:       // %bb.0: // %common.ret
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    lsr w9, w0, #13
+; CHECK-NEXT:    mov w8, #42 // =0x2a
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    csinc w8, w8, wzr, ne
+; CHECK-NEXT:    cmp w9, #0
+; CHECK-NEXT:    mov w9, #128 // =0x80
+; CHECK-NEXT:    csel w0, w9, w8, ne
+; CHECK-NEXT:    bl zoo
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    .cfi_restore w30
+; CHECK-NEXT:    ret
+; [...]
+
+  %tst_low = icmp ugt i32 %in, 8191
+  br i1 %tst_low, label %true, label %false
+
+true:
+  call i32 @zoo(i32 128)
+  ret void
+
+false:
+  %tst = icmp ugt i32 %in, 0
+  br i1 %tst, label %truer, label %falser
+
+truer:
+  call i32 @zoo(i32 42)
+  ret void
+
+falser:
+  call i32 @zoo(i32 1)
+  ret void
+}
+
+define i32 @combine_ugt_uge_sel(i64 %v, ptr %p) #0 {
+; CHECK-LABEL: combine_ugt_uge_sel:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    cmp w9, #0
+; CHECK-NEXT:    csel x10, x0, xzr, ne
+; CHECK-NEXT:    str x10, [x1]
+; CHECK-NEXT:    cbz w9, .LBB25_2
+; CHECK-NEXT:  // %bb.1: // %lor.lhs.false
+; CHECK-NEXT:    cmp w9, #2
+; CHECK-NEXT:    b.hs .LBB25_4
+; CHECK-NEXT:    b .LBB25_6
+; CHECK-NEXT:  .LBB25_2: // %land.lhs.true
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w10, w9
+; CHECK-NEXT:    b.ne .LBB25_4
+; CHECK-NEXT:  // %bb.3:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB25_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB25_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB25_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i32, ptr @a, align 4
+  %cmp = icmp ugt i32 %0, 0
+  %m = select i1 %cmp, i64 %v, i64 0
+  store i64 %m, ptr %p
+  br i1 %cmp, label %lor.lhs.false, label %land.lhs.true
+
+land.lhs.true:                                    ; preds = %entry
+  %1 = load i32, ptr @b, align 4
+  %2 = load i32, ptr @c, align 4
+  %cmp1 = icmp eq i32 %1, %2
+  br i1 %cmp1, label %return, label %land.lhs.true3
+
+lor.lhs.false:                                    ; preds = %entry
+  %cmp2 = icmp ugt i32 %0, 1
+  br i1 %cmp2, label %land.lhs.true3, label %if.end
+
+land.lhs.true3:                                   ; preds = %lor.lhs.false, %land.lhs.true
+  %3 = load i32, ptr @b, align 4
+  %4 = load i32, ptr @d, align 4
+  %cmp4 = icmp eq i32 %3, %4
+  br i1 %cmp4, label %return, label %if.end
+
+if.end:                                           ; preds = %land.lhs.true3, %lor.lhs.false
+  br label %return
+
+return:                                           ; preds = %if.end, %land.lhs.true3, %land.lhs.true
+  %retval.0 = phi i32 [ 0, %if.end ], [ 1, %land.lhs.true3 ], [ 1, %land.lhs.true ]
+  ret i32 %retval.0
+}
+
 declare i32 @zoo(i32)
 
 declare double @yoo(i32)

>From 4c3c334d5ea2dae707546c2a895b047e175df5a8 Mon Sep 17 00:00:00 2001
From: AZero13 <gfunni234 at gmail.com>
Date: Thu, 29 Jan 2026 09:10:59 -0500
Subject: [PATCH 2/2] Update AArch64ConditionOptimizer.cpp

---
 .../Target/AArch64/AArch64ConditionOptimizer.cpp   | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
index 8b64968c558a4..0594a0f388657 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionOptimizer.cpp
@@ -265,14 +265,22 @@ AArch64ConditionOptimizer::adjustCmp(MachineInstr *CmpMI,
   const int OldImm = (int)CmpMI->getOperand(2).getImm();
   const int NewImm = std::abs(OldImm + Correction);
 
-  // Handle cmn 1 -> cmp 0, transitions by adjusting compare instruction opcode.
+  // Assert that we never encounter cmn 0 as input (adds with immediate 0
+  // doesn't set flags correctly and would change program semantics).
+  assert((OldImm != 0 || !Negative) && "Should not encounter cmn 0!");
+
+  // Handle cmn 1 -> cmp 0 transitions by adjusting compare instruction opcode.
+  // This happens when adjusting from "compare with -1" (cmn #1) to "compare
+  // with 0" (cmp #0) for signed comparisons.
   if (OldImm == 1 && Negative && Correction == -1) {
     // If we are adjusting from -1 to 0, we need to change the opcode.
     Opc = getComplementOpc(Opc);
   }
 
-  // Handle +0 -> -1 transitions by adjusting compare instruction opcode.
-  assert((OldImm != 0 || !Negative) && "Should not encounter cmn 0!");
+  // Handle cmp 0 -> cmn 1 transitions by adjusting compare instruction opcode.
+  // This happens when adjusting from "compare with 0" (cmp #0) to "compare with
+  // -1" (cmn #1) for signed comparisons. Note: OldImm == 0 implies !Negative
+  // due to the assert above.
   if (OldImm == 0 && Correction == -1) {
     Opc = getComplementOpc(Opc);
   }



More information about the llvm-commits mailing list