[llvm] [InstCombine] Fold `minmax (X & NegPow2C, Y & NegPow2C) -> minmax(X, Y) & NegPow2C` (PR #88859)

Yingwei Zheng via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 16 01:42:47 PDT 2024


https://github.com/dtcxzyw created https://github.com/llvm/llvm-project/pull/88859

Alive2: https://alive2.llvm.org/ce/z/NFtkSX

This optimization will be beneficial to jemalloc users.


>From c5030d8dcb2d59a6dcc3c7b5bb7a9ef76e6b9757 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Tue, 16 Apr 2024 16:29:58 +0800
Subject: [PATCH 1/2] [InstCombine] Add pre-commit tests. NFC.

---
 .../InstCombine/minmax-intrinsics.ll          | 93 +++++++++++++++++++
 1 file changed, 93 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
index ae2e115b1dd9a2..1acdd987729d94 100644
--- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
@@ -2581,3 +2581,96 @@ entry:
   %val = call i8 @llvm.umin.i8(i8 %sub, i8 3)
   ret i8 %val
 }
+
+define i8 @test_umax_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_umax_and(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, -64
+  %y1 = and i8 %y, -64
+  %res = call i8 @llvm.umax.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}
+
+define i8 @test_umin_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_umin_and(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.umin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, -64
+  %y1 = and i8 %y, -64
+  %res = call i8 @llvm.umin.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}
+
+define i8 @test_smax_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smax_and(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smax.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, -64
+  %y1 = and i8 %y, -64
+  %res = call i8 @llvm.smax.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}
+
+define i8 @test_smin_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, -64
+  %y1 = and i8 %y, -64
+  %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}
+
+define i8 @test_smin_and_mismatch(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and_mismatch(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -32
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, -64
+  %y1 = and i8 %y, -32
+  %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}
+
+define i8 @test_smin_and_non_negated_pow2(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and_non_negated_pow2(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], 31
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], 31
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, 31
+  %y1 = and i8 %y, 31
+  %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}
+
+define i8 @test_smin_and_multiuse(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and_multiuse(
+; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], 31
+; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], 31
+; CHECK-NEXT:    call void @use(i8 [[Y1]])
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %x1 = and i8 %x, 31
+  %y1 = and i8 %y, 31
+  call void @use(i8 %y1)
+  %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+  ret i8 %res
+}

>From 114a3adc2065ee4c114202ac0edfc29e4bfd71b2 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Tue, 16 Apr 2024 16:32:24 +0800
Subject: [PATCH 2/2] [InstCombine] Fold `minmax (X & NegPow2C, Y & NegPow2C)
 -> minmax(X, Y) & NegPow2C`

---
 .../InstCombine/InstCombineCalls.cpp          |  8 +++++-
 .../InstCombine/minmax-intrinsics.ll          | 28 ++++++++-----------
 2 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index bae8579fc3650b..ba5db854647a42 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1774,6 +1774,13 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
     if (Instruction *I = moveAddAfterMinMax(II, Builder))
       return I;
 
+    // minmax (X & NegPow2C, Y & NegPow2C) --> minmax(X, Y) & NegPow2C
+    const APInt *RHSC;
+    if (match(I0, m_OneUse(m_And(m_Value(X), m_NegatedPower2(RHSC)))) &&
+        match(I1, m_OneUse(m_And(m_Value(Y), m_SpecificInt(*RHSC)))))
+      return BinaryOperator::CreateAnd(Builder.CreateBinaryIntrinsic(IID, X, Y),
+                                       ConstantInt::get(II->getType(), *RHSC));
+
     // smax(X, -X) --> abs(X)
     // smin(X, -X) --> -abs(X)
     // umax(X, -X) --> -abs(X)
@@ -1815,7 +1822,6 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
        return NewMinMax;
 
     // Try to fold minmax with constant RHS based on range information
-    const APInt *RHSC;
     if (match(I1, m_APIntAllowUndef(RHSC))) {
       ICmpInst::Predicate Pred =
           ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
diff --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
index 1acdd987729d94..bd1a47bbfcc193 100644
--- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
@@ -2584,10 +2584,9 @@ entry:
 
 define i8 @test_umax_and(i8 %x, i8 %y) {
 ; CHECK-LABEL: @test_umax_and(
-; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
-; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
-; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[X1]], i8 [[Y1]])
-; CHECK-NEXT:    ret i8 [[RES]]
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT:    [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT:    ret i8 [[RES1]]
 ;
   %x1 = and i8 %x, -64
   %y1 = and i8 %y, -64
@@ -2597,10 +2596,9 @@ define i8 @test_umax_and(i8 %x, i8 %y) {
 
 define i8 @test_umin_and(i8 %x, i8 %y) {
 ; CHECK-LABEL: @test_umin_and(
-; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
-; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
-; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.umin.i8(i8 [[X1]], i8 [[Y1]])
-; CHECK-NEXT:    ret i8 [[RES]]
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.umin.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT:    [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT:    ret i8 [[RES1]]
 ;
   %x1 = and i8 %x, -64
   %y1 = and i8 %y, -64
@@ -2610,10 +2608,9 @@ define i8 @test_umin_and(i8 %x, i8 %y) {
 
 define i8 @test_smax_and(i8 %x, i8 %y) {
 ; CHECK-LABEL: @test_smax_and(
-; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
-; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
-; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smax.i8(i8 [[X1]], i8 [[Y1]])
-; CHECK-NEXT:    ret i8 [[RES]]
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smax.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT:    [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT:    ret i8 [[RES1]]
 ;
   %x1 = and i8 %x, -64
   %y1 = and i8 %y, -64
@@ -2623,10 +2620,9 @@ define i8 @test_smax_and(i8 %x, i8 %y) {
 
 define i8 @test_smin_and(i8 %x, i8 %y) {
 ; CHECK-LABEL: @test_smin_and(
-; CHECK-NEXT:    [[X1:%.*]] = and i8 [[X:%.*]], -64
-; CHECK-NEXT:    [[Y1:%.*]] = and i8 [[Y:%.*]], -64
-; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
-; CHECK-NEXT:    ret i8 [[RES]]
+; CHECK-NEXT:    [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT:    [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT:    ret i8 [[RES1]]
 ;
   %x1 = and i8 %x, -64
   %y1 = and i8 %y, -64



More information about the llvm-commits mailing list