[llvm] [InstCombine] Add Missed Optimization (PR #141962)

Bobby SONG via llvm-commits llvm-commits at lists.llvm.org
Fri May 30 06:53:56 PDT 2025


https://github.com/bobby-b-song updated https://github.com/llvm/llvm-project/pull/141962

>From 7d6888eda95f1e2715a3d77a58972c730465f4c2 Mon Sep 17 00:00:00 2001
From: Bobby Song <me at bby.ee>
Date: Thu, 29 May 2025 16:09:45 +0100
Subject: [PATCH 1/2] Add test case for issue 141479

---
 .../and-comparison-not-always-false.ll        | 21 +++++++++++++++++++
 1 file changed, 21 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll

diff --git a/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll b/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll
new file mode 100644
index 0000000000000..174d97d30bcf8
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+define i1 @test(i32 %0, i32 %1) {
+; CHECK-LABEL: define i1 @test(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP0]], -1
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP0]], [[TMP4]]
+; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = and i1 [[TMP3]], [[TMP5]]
+; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
+;
+common.ret:
+  %2 = xor i32 %0, -1
+  %3 = icmp ule i32 %1, %2
+  %4 = xor i32 %1, -1
+  %5 = icmp ugt i32 %0, %4
+  %common.ret.op = and i1 %3, %5
+  ret i1 %common.ret.op
+}

>From b11691e95f5984138f8ce3ab18280c5db58b1a18 Mon Sep 17 00:00:00 2001
From: Bobby Song <me at bby.ee>
Date: Thu, 29 May 2025 18:52:27 +0100
Subject: [PATCH 2/2] Added missed optimization: need optimization for further
 adaption

Add test for optimization

Extend the case further to signed intergers and more in/exclusive ranges

Update test for optimization
---
 llvm/lib/Analysis/InstructionSimplify.cpp     |  14 ++
 .../and-comparison-not-always-false.ll        | 125 +++++++++++++++++-
 2 files changed, 135 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 23e147ba8c6a1..e626d91a81f0b 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -2178,6 +2178,20 @@ static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
       match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
     return Constant::getNullValue(Op0->getType());
 
+  // (X <= ~Y) && (Y > ~X) --> 0
+  CmpPredicate Pred0, Pred1;
+  if (match(Op0,
+            m_c_ICmp(Pred0, m_Value(X), m_c_Xor(m_Value(Y), m_AllOnes()))) &&
+      match(Op1, m_c_ICmp(Pred1, m_Specific(Y),
+                          m_c_Xor(m_Specific(X), m_AllOnes())))) {
+    if (ICmpInst::isLE(Pred0) && ICmpInst::isGT(Pred1))
+      return ConstantInt::getFalse(Op0->getType());
+    if (ICmpInst::isLT(Pred0) && ICmpInst::isGE(Pred1))
+      return ConstantInt::getFalse(Op0->getType());
+    if (ICmpInst::isLT(Pred0) && ICmpInst::isGT(Pred1))
+      return ConstantInt::getFalse(Op0->getType());
+  }
+
   if (Op0->getType()->isIntOrIntVectorTy(1)) {
     if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
       // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
diff --git a/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll b/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll
index 174d97d30bcf8..0ed39decf578c 100644
--- a/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll
+++ b/llvm/test/Transforms/InstCombine/and-comparison-not-always-false.ll
@@ -1,13 +1,73 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-define i1 @test(i32 %0, i32 %1) {
-; CHECK-LABEL: define i1 @test(
+define i1 @test_pass_et(i32 %0, i32 %1) {
+; CHECK-LABEL: define i1 @test_pass_et(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    ret i1 false
+;
+common.ret:
+  %2 = xor i32 %0, -1
+  %3 = icmp ule i32 %1, %2
+  %4 = xor i32 %1, -1
+  %5 = icmp ugt i32 %0, %4
+  %common.ret.op = and i1 %3, %5
+  ret i1 %common.ret.op
+}
+
+define i1 @test_pass_signed(i32 %0, i32 %1) {
+; CHECK-LABEL: define i1 @test_pass_signed(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    ret i1 false
+;
+common.ret:
+  %2 = xor i32 %0, -1
+  %3 = icmp sle i32 %1, %2
+  %4 = xor i32 %1, -1
+  %5 = icmp sgt i32 %0, %4
+  %common.ret.op = and i1 %3, %5
+  ret i1 %common.ret.op
+}
+
+define i1 @test_pass_tt(i32 %0, i32 %1) {
+; CHECK-LABEL: define i1 @test_pass_tt(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    ret i1 false
+;
+common.ret:
+  %2 = xor i32 %0, -1
+  %3 = icmp ult i32 %1, %2
+  %4 = xor i32 %1, -1
+  %5 = icmp ugt i32 %0, %4
+  %common.ret.op = and i1 %3, %5
+  ret i1 %common.ret.op
+}
+
+define i1 @test_pass_te(i32 %0, i32 %1) {
+; CHECK-LABEL: define i1 @test_pass_te(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    ret i1 false
+;
+common.ret:
+  %2 = xor i32 %0, -1
+  %3 = icmp ult i32 %1, %2
+  %4 = xor i32 %1, -1
+  %5 = icmp uge i32 %0, %4
+  %common.ret.op = and i1 %3, %5
+  ret i1 %common.ret.op
+}
+
+define i1 @test_nopass_ee(i32 %0, i32 %1) {
+; CHECK-LABEL: define i1 @test_nopass_ee(
 ; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) {
 ; CHECK-NEXT:  [[COMMON_RET:.*:]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP0]], -1
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP0]], [[TMP4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i32 [[TMP0]], [[TMP4]]
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = and i1 [[TMP3]], [[TMP5]]
 ; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
 ;
@@ -15,7 +75,64 @@ common.ret:
   %2 = xor i32 %0, -1
   %3 = icmp ule i32 %1, %2
   %4 = xor i32 %1, -1
-  %5 = icmp ugt i32 %0, %4
+  %5 = icmp uge i32 %0, %4
   %common.ret.op = and i1 %3, %5
   ret i1 %common.ret.op
 }
+
+define i1 @test_no_change_et(i32 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: define i1 @test_no_change_et(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[TMP2:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i32 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP1]], 0
+; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = and i1 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
+;
+common.ret:
+  %3 = xor i32 %0, -1
+  %4 = icmp ule i32 %1, %3
+  %5 = xor i32 %1, -1
+  %6 = icmp ugt i32 %1, %5
+  %common.ret.op = and i1 %6, %4
+  ret i1 %common.ret.op
+}
+
+define i1 @test_no_change_te(i32 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: define i1 @test_no_change_te(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[TMP2:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i32 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP1]], 0
+; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = and i1 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
+;
+common.ret:
+  %3 = xor i32 %0, -1
+  %4 = icmp ult i32 %1, %3
+  %5 = xor i32 %1, -1
+  %6 = icmp uge i32 %1, %5
+  %common.ret.op = and i1 %6, %4
+  ret i1 %common.ret.op
+}
+
+define i1 @test_no_change_tt(i32 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: define i1 @test_no_change_tt(
+; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[TMP2:%.*]]) {
+; CHECK-NEXT:  [[COMMON_RET:.*:]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i32 [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP1]], 0
+; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = and i1 [[TMP5]], [[TMP4]]
+; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
+;
+common.ret:
+  %3 = xor i32 %0, -1
+  %4 = icmp ult i32 %1, %3
+  %5 = xor i32 %1, -1
+  %6 = icmp ugt i32 %1, %5
+  %common.ret.op = and i1 %6, %4
+  ret i1 %common.ret.op
+}



More information about the llvm-commits mailing list