[llvm] c0bbd0c - [InstCombine] fold not ops around min/max intrinsics

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 7 14:31:45 PDT 2021


Author: Sanjay Patel
Date: 2021-04-07T17:31:36-04:00
New Revision: c0bbd0cc35b91c7244b15c3fed1e0dbcf9c0df55

URL: https://github.com/llvm/llvm-project/commit/c0bbd0cc35b91c7244b15c3fed1e0dbcf9c0df55
DIFF: https://github.com/llvm/llvm-project/commit/c0bbd0cc35b91c7244b15c3fed1e0dbcf9c0df55.diff

LOG: [InstCombine] fold not ops around min/max intrinsics

This is another step towards parity with the existing
cmp+select folds (see D98152).

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/test/Transforms/InstCombine/minmax-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 8e9d4f7d12ec..6bb0627c40a8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3446,6 +3446,25 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
     return CastedXor;
 
+  // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
+  // ~min(~X, ~Y) --> max(X, Y)
+  // ~max(~X, Y) --> min(X, ~Y)
+  auto *II = dyn_cast<IntrinsicInst>(Op0);
+  if (II && match(Op1, m_AllOnes())) {
+    if (match(Op0, m_MaxOrMin(m_Not(m_Value(X)), m_Not(m_Value(Y))))) {
+      Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
+      Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
+      return replaceInstUsesWith(I, InvMaxMin);
+    }
+    if (match(Op0, m_OneUse(m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y))))) {
+      Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
+      Value *NotY = Builder.CreateNot(Y);
+      Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotY);
+      return replaceInstUsesWith(I, InvMaxMin);
+    }
+  }
+
+  // TODO: Remove folds if we canonicalize to intrinsics (see above).
   // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
   //
   //   %notx = xor i32 %x, -1

diff  --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
index 098519cfa794..c43c4aa044cf 100644
--- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
@@ -526,9 +526,8 @@ define i8 @not_smax_of_nots(i8 %x, i8 %y) {
 ; CHECK-NEXT:    call void @use(i8 [[NOTX]])
 ; CHECK-NEXT:    [[NOTY:%.*]] = xor i8 [[Y:%.*]], -1
 ; CHECK-NEXT:    call void @use(i8 [[NOTY]])
-; CHECK-NEXT:    [[M:%.*]] = call i8 @llvm.smax.i8(i8 [[NOTX]], i8 [[NOTY]])
-; CHECK-NEXT:    [[NOTM:%.*]] = xor i8 [[M]], -1
-; CHECK-NEXT:    ret i8 [[NOTM]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.smin.i8(i8 [[X]], i8 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %notx = xor i8 %x, -1
   call void @use(i8 %notx)
@@ -547,8 +546,8 @@ define i8 @not_smin_of_nots(i8 %x, i8 %y) {
 ; CHECK-NEXT:    call void @use(i8 [[NOTY]])
 ; CHECK-NEXT:    [[M:%.*]] = call i8 @llvm.smin.i8(i8 [[NOTX]], i8 [[NOTY]])
 ; CHECK-NEXT:    call void @use(i8 [[M]])
-; CHECK-NEXT:    [[NOTM:%.*]] = xor i8 [[M]], -1
-; CHECK-NEXT:    ret i8 [[NOTM]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.smax.i8(i8 [[X]], i8 [[Y]])
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %notx = xor i8 %x, -1
   call void @use(i8 %notx)
@@ -564,9 +563,9 @@ define i8 @not_umax_of_not(i8 %x, i8 %y) {
 ; CHECK-LABEL: @not_umax_of_not(
 ; CHECK-NEXT:    [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use(i8 [[NOTX]])
-; CHECK-NEXT:    [[M:%.*]] = call i8 @llvm.umax.i8(i8 [[NOTX]], i8 [[Y:%.*]])
-; CHECK-NEXT:    [[NOTM:%.*]] = xor i8 [[M]], -1
-; CHECK-NEXT:    ret i8 [[NOTM]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.umin.i8(i8 [[X]], i8 [[TMP1]])
+; CHECK-NEXT:    ret i8 [[TMP2]]
 ;
   %notx = xor i8 %x, -1
   call void @use(i8 %notx)
@@ -575,6 +574,8 @@ define i8 @not_umax_of_not(i8 %x, i8 %y) {
   ret i8 %notm
 }
 
+; Negative test - this would require an extra instruction.
+
 define i8 @not_umin_of_not(i8 %x, i8 %y) {
 ; CHECK-LABEL: @not_umin_of_not(
 ; CHECK-NEXT:    [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
@@ -596,9 +597,8 @@ define i8 @not_umin_of_not_constant_op(i8 %x) {
 ; CHECK-LABEL: @not_umin_of_not_constant_op(
 ; CHECK-NEXT:    [[NOTX:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use(i8 [[NOTX]])
-; CHECK-NEXT:    [[M:%.*]] = call i8 @llvm.umin.i8(i8 [[NOTX]], i8 42)
-; CHECK-NEXT:    [[NOTM:%.*]] = xor i8 [[M]], -1
-; CHECK-NEXT:    ret i8 [[NOTM]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[X]], i8 -43)
+; CHECK-NEXT:    ret i8 [[TMP1]]
 ;
   %notx = xor i8 %x, -1
   call void @use(i8 %notx)


        


More information about the llvm-commits mailing list