[llvm] 8c664a9 - [InstCombine] Fold negation of calls to `ucmp/scmp` by swapping its operands (#98360)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 10 12:51:58 PDT 2024


Author: Poseydon42
Date: 2024-07-10T21:51:55+02:00
New Revision: 8c664a9f507fb36aaec995a895178d88566ad58f

URL: https://github.com/llvm/llvm-project/commit/8c664a9f507fb36aaec995a895178d88566ad58f
DIFF: https://github.com/llvm/llvm-project/commit/8c664a9f507fb36aaec995a895178d88566ad58f.diff

LOG: [InstCombine] Fold negation of calls to `ucmp/scmp` by swapping its operands (#98360)

Proofs: https://alive2.llvm.org/ce/z/cp_a36

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
    llvm/test/Transforms/InstCombine/scmp.ll
    llvm/test/Transforms/InstCombine/ucmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
index b3426562a4d87..e4895b59f4b4a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp
@@ -222,6 +222,11 @@ std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
     }
     break;
   }
+  case Instruction::Call:
+    if (auto *CI = dyn_cast<CmpIntrinsic>(I); CI && CI->hasOneUse())
+      return Builder.CreateIntrinsic(CI->getType(), CI->getIntrinsicID(),
+                                     {CI->getRHS(), CI->getLHS()});
+    break;
   default:
     break; // Other instructions require recursive reasoning.
   }

diff  --git a/llvm/test/Transforms/InstCombine/scmp.ll b/llvm/test/Transforms/InstCombine/scmp.ll
index 4f903a79afd5d..2523872562cad 100644
--- a/llvm/test/Transforms/InstCombine/scmp.ll
+++ b/llvm/test/Transforms/InstCombine/scmp.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
+declare void @use(i8 %value)
+
 define i1 @scmp_eq_0(i32 %x, i32 %y) {
 ; CHECK-LABEL: define i1 @scmp_eq_0(
 ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
@@ -154,3 +156,30 @@ define i1 @scmp_sle_neg_1(i32 %x, i32 %y) {
   %2 = icmp sle i8 %1, -1
   ret i1 %2
 }
+
+; ========== Fold -scmp(x, y) => scmp(y, x) ==========
+define i8 @scmp_negated(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_negated(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  %2 = sub i8 0, %1
+  ret i8 %2
+}
+
+; Negative test: do not fold if the original scmp result is already used
+define i8 @scmp_negated_multiuse(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_negated_multiuse(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.scmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    call void @use(i8 [[TMP1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = sub nsw i8 0, [[TMP1]]
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  call void @use(i8 %1)
+  %2 = sub i8 0, %1
+  ret i8 %2
+}

diff  --git a/llvm/test/Transforms/InstCombine/ucmp.ll b/llvm/test/Transforms/InstCombine/ucmp.ll
index 9ab67560c9117..7210455094baa 100644
--- a/llvm/test/Transforms/InstCombine/ucmp.ll
+++ b/llvm/test/Transforms/InstCombine/ucmp.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
+declare void @use(i8 %value)
+
 define i1 @ucmp_eq_0(i32 %x, i32 %y) {
 ; CHECK-LABEL: define i1 @ucmp_eq_0(
 ; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
@@ -154,3 +156,30 @@ define i1 @ucmp_sle_neg_1(i32 %x, i32 %y) {
   %2 = icmp sle i8 %1, -1
   ret i1 %2
 }
+
+; ========== Fold -ucmp(x, y) => ucmp(y, x) ==========
+define i8 @ucmp_negated(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @ucmp_negated(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[Y]], i32 [[X]])
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  %2 = sub i8 0, %1
+  ret i8 %2
+}
+
+; Negative test: do not fold if the original ucmp result is already used
+define i8 @ucmp_negated_multiuse(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @ucmp_negated_multiuse(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    call void @use(i8 [[TMP1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = sub nsw i8 0, [[TMP1]]
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  call void @use(i8 %1)
+  %2 = sub i8 0, %1
+  ret i8 %2
+}


        


More information about the llvm-commits mailing list