[llvm] b7c0f79 - [InstCombine] Replace `isFreeToInvert` + `CreateNot` with `getFreelyInverted`

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 20 15:59:54 PST 2023


Author: Noah Goldstein
Date: 2023-11-20T17:59:27-06:00
New Revision: b7c0f79926ad0e4f4abe193c82dad8468855eb81

URL: https://github.com/llvm/llvm-project/commit/b7c0f79926ad0e4f4abe193c82dad8468855eb81
DIFF: https://github.com/llvm/llvm-project/commit/b7c0f79926ad0e4f4abe193c82dad8468855eb81.diff

LOG: [InstCombine] Replace `isFreeToInvert` + `CreateNot` with `getFreelyInverted`

This is nearly an NFC, the only change is potentially to order that
values are created/names.

Otherwise it is a slight speed boost/simplification to avoid having to
go through the `getFreelyInverted` recursive logic twice to simplify
the extra `not` op.

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
    llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
    llvm/test/Transforms/InstCombine/demorgan-sink-not-into-xor.ll
    llvm/test/Transforms/InstCombine/free-inversion.ll
    llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
    llvm/test/Transforms/InstCombine/minmax-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index f2ae0cb85d94c36..9b115cf32183291 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -2229,8 +2229,10 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) {
     if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
         isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
         (ConsumesOp0 || ConsumesOp1)) {
-      Value *NotOp0 = Builder.CreateNot(Op0);
-      Value *NotOp1 = Builder.CreateNot(Op1);
+      Value *NotOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
+      Value *NotOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
+      assert(NotOp0 != nullptr && NotOp1 != nullptr &&
+             "isFreeToInvert desynced with getFreelyInverted");
       return BinaryOperator::CreateSub(NotOp1, NotOp0);
     }
   }

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index cc2fb7dfe1565f3..02881109f17d29f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2503,16 +2503,24 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
       return BinaryOperator::CreateAnd(Op1, B);
 
     // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
-    if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
-      if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
-        if (Op1->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
-          return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
+    if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
+        match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
+      Value *NotC = Op1->hasOneUse()
+                        ? Builder.CreateNot(C)
+                        : getFreelyInverted(C, C->hasOneUse(), &Builder);
+      if (NotC != nullptr)
+        return BinaryOperator::CreateAnd(Op0, NotC);
+    }
 
     // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
-    if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
-      if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
-        if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
-          return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
+    if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))) &&
+        match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) {
+      Value *NotC = Op0->hasOneUse()
+                        ? Builder.CreateNot(C)
+                        : getFreelyInverted(C, C->hasOneUse(), &Builder);
+      if (NotC != nullptr)
+        return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
+    }
 
     // (A | B) & (~A ^ B) -> A & B
     // (A | B) & (B ^ ~A) -> A & B
@@ -4047,26 +4055,6 @@ static Instruction *visitMaskedMerge(BinaryOperator &I,
   return nullptr;
 }
 
-// Transform
-//   ~(x ^ y)
-// into:
-//   (~x) ^ y
-// or into
-//   x ^ (~y)
-static Instruction *sinkNotIntoXor(BinaryOperator &I, Value *X, Value *Y,
-                                   InstCombiner::BuilderTy &Builder) {
-  // We only want to do the transform if it is free to do.
-  if (InstCombiner::isFreeToInvert(X, X->hasOneUse())) {
-    // Ok, good.
-  } else if (InstCombiner::isFreeToInvert(Y, Y->hasOneUse())) {
-    std::swap(X, Y);
-  } else
-    return nullptr;
-
-  Value *NotX = Builder.CreateNot(X, X->getName() + ".not");
-  return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan");
-}
-
 static Instruction *foldNotXor(BinaryOperator &I,
                                InstCombiner::BuilderTy &Builder) {
   Value *X, *Y;
@@ -4075,9 +4063,6 @@ static Instruction *foldNotXor(BinaryOperator &I,
   if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y))))))
     return nullptr;
 
-  if (Instruction *NewXor = sinkNotIntoXor(I, X, Y, Builder))
-    return NewXor;
-
   auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) {
     return A == C || A == D || B == C || B == D;
   };
@@ -4375,15 +4360,6 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
   // ~max(~X, Y) --> min(X, ~Y)
   auto *II = dyn_cast<IntrinsicInst>(NotOp);
   if (II && II->hasOneUse()) {
-    if (match(NotOp, m_MaxOrMin(m_Value(X), m_Value(Y))) &&
-        isFreeToInvert(X, X->hasOneUse()) &&
-        isFreeToInvert(Y, Y->hasOneUse())) {
-      Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
-      Value *NotX = Builder.CreateNot(X);
-      Value *NotY = Builder.CreateNot(Y);
-      Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, NotX, NotY);
-      return replaceInstUsesWith(I, InvMaxMin);
-    }
     if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) {
       Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
       Value *NotY = Builder.CreateNot(Y);

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 64cbfebf0102876..f8346cd03849acf 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1698,12 +1698,12 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
     auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
       Value *A;
       if (match(X, m_OneUse(m_Not(m_Value(A)))) &&
-          !isFreeToInvert(A, A->hasOneUse()) &&
-          isFreeToInvert(Y, Y->hasOneUse())) {
-        Value *NotY = Builder.CreateNot(Y);
-        Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
-        Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
-        return BinaryOperator::CreateNot(InvMaxMin);
+          !isFreeToInvert(A, A->hasOneUse())) {
+        if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) {
+          Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
+          Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
+          return BinaryOperator::CreateNot(InvMaxMin);
+        }
       }
       return nullptr;
     };

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 783668db2da7670..b6e59f707e78268 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -3281,10 +3281,12 @@ Instruction *InstCombinerImpl::foldICmpBitCast(ICmpInst &Cmp) {
   // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
   // Example: are all elements equal? --> are zero elements not equal?
   // TODO: Try harder to reduce compare of 2 freely invertible operands?
-  if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse() &&
-      isFreeToInvert(BCSrcOp, BCSrcOp->hasOneUse())) {
-    Value *Cast = Builder.CreateBitCast(Builder.CreateNot(BCSrcOp), DstType);
-    return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
+  if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse()) {
+    if (Value *NotBCSrcOp =
+            getFreelyInverted(BCSrcOp, BCSrcOp->hasOneUse(), &Builder)) {
+      Value *Cast = Builder.CreateBitCast(NotBCSrcOp, DstType);
+      return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
+    }
   }
 
   // If this is checking if all elements of an extended vector are clear or not,
@@ -4549,14 +4551,13 @@ static Instruction *foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q,
 
   if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
     // icmp (X | Y) eq/ne Y --> (X & ~Y) eq/ne 0 if Y is freely invertible
-    if (IC.isFreeToInvert(Op1, Op1->hasOneUse()))
-      return new ICmpInst(Pred,
-                          IC.Builder.CreateAnd(A, IC.Builder.CreateNot(Op1)),
+    if (Value *NotOp1 =
+            IC.getFreelyInverted(Op1, Op1->hasOneUse(), &IC.Builder))
+      return new ICmpInst(Pred, IC.Builder.CreateAnd(A, NotOp1),
                           Constant::getNullValue(Op1->getType()));
     // icmp (X | Y) eq/ne Y --> (~X | Y) eq/ne -1 if X  is freely invertible.
-    if (IC.isFreeToInvert(A, A->hasOneUse()))
-      return new ICmpInst(Pred,
-                          IC.Builder.CreateOr(Op1, IC.Builder.CreateNot(A)),
+    if (Value *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
+      return new ICmpInst(Pred, IC.Builder.CreateOr(Op1, NotA),
                           Constant::getAllOnesValue(Op1->getType()));
   }
   return nullptr;

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 71c2d68881441ac..2dda46986f0fd08 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -3075,18 +3075,18 @@ Instruction *InstCombinerImpl::foldSelectOfBools(SelectInst &SI) {
     return SelectInst::Create(TrueVal, OrV, Zero);
   }
   // select (c & b), a, b -> select b, (select ~c, true, a), false
-  if (match(CondVal, m_OneUse(m_c_And(m_Value(C), m_Specific(FalseVal)))) &&
-      isFreeToInvert(C, C->hasOneUse())) {
-    Value *NotC = Builder.CreateNot(C);
-    Value *OrV = Builder.CreateSelect(NotC, One, TrueVal);
-    return SelectInst::Create(FalseVal, OrV, Zero);
+  if (match(CondVal, m_OneUse(m_c_And(m_Value(C), m_Specific(FalseVal))))) {
+    if (Value *NotC = getFreelyInverted(C, C->hasOneUse(), &Builder)) {
+      Value *OrV = Builder.CreateSelect(NotC, One, TrueVal);
+      return SelectInst::Create(FalseVal, OrV, Zero);
+    }
   }
   // select (a | c), a, b -> select a, true, (select ~c, b, false)
-  if (match(CondVal, m_OneUse(m_c_Or(m_Specific(TrueVal), m_Value(C)))) &&
-      isFreeToInvert(C, C->hasOneUse())) {
-    Value *NotC = Builder.CreateNot(C);
-    Value *AndV = Builder.CreateSelect(NotC, FalseVal, Zero);
-    return SelectInst::Create(TrueVal, One, AndV);
+  if (match(CondVal, m_OneUse(m_c_Or(m_Specific(TrueVal), m_Value(C))))) {
+    if (Value *NotC = getFreelyInverted(C, C->hasOneUse(), &Builder)) {
+      Value *AndV = Builder.CreateSelect(NotC, FalseVal, Zero);
+      return SelectInst::Create(TrueVal, One, AndV);
+    }
   }
   // select (c & ~b), a, b -> select b, true, (select c, a, false)
   if (match(CondVal,

diff  --git a/llvm/test/Transforms/InstCombine/demorgan-sink-not-into-xor.ll b/llvm/test/Transforms/InstCombine/demorgan-sink-not-into-xor.ll
index 0ea645561696a50..b89cbefb0a70340 100644
--- a/llvm/test/Transforms/InstCombine/demorgan-sink-not-into-xor.ll
+++ b/llvm/test/Transforms/InstCombine/demorgan-sink-not-into-xor.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --prefix-filecheck-ir-name V
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
 ; https://bugs.llvm.org/show_bug.cgi?id=38446
@@ -22,10 +22,10 @@ declare i1 @gen1()
 
 define i1 @positive_easyinvert(i16 %x, i8 %y) {
 ; CHECK-LABEL: @positive_easyinvert(
-; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i16 [[X:%.*]], 0
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i8 [[Y:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP2]], [[TMP1]]
-; CHECK-NEXT:    ret i1 [[TMP4]]
+; CHECK-NEXT:    [[VTMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i16 [[X:%.*]], -1
+; CHECK-NEXT:    [[VTMP4:%.*]] = xor i1 [[VTMP2]], [[TMP1]]
+; CHECK-NEXT:    ret i1 [[VTMP4]]
 ;
   %tmp1 = icmp slt i16 %x, 0
   %tmp2 = icmp slt i8 %y, 0
@@ -36,24 +36,24 @@ define i1 @positive_easyinvert(i16 %x, i8 %y) {
 
 define i1 @positive_easyinvert0(i8 %y) {
 ; CHECK-LABEL: @positive_easyinvert0(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @gen1()
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i8 [[Y:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP2]], [[TMP1]]
-; CHECK-NEXT:    ret i1 [[TMP4]]
+; CHECK-NEXT:    [[VTMP1:%.*]] = call i1 @gen1()
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[VTMP4:%.*]] = xor i1 [[TMP1]], [[VTMP1]]
+; CHECK-NEXT:    ret i1 [[VTMP4]]
 ;
   %tmp1 = call i1 @gen1()
-  %tmp2 = icmp slt i8 %y, 0
-  %tmp3 = xor i1 %tmp2, %tmp1
+  %cond = icmp slt i8 %y, 0
+  %tmp3 = xor i1 %cond, %tmp1
   %tmp4 = xor i1 %tmp3, true
   ret i1 %tmp4
 }
 
 define i1 @positive_easyinvert1(i8 %y) {
 ; CHECK-LABEL: @positive_easyinvert1(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @gen1()
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp sgt i8 [[Y:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP2]], [[TMP1]]
-; CHECK-NEXT:    ret i1 [[TMP4]]
+; CHECK-NEXT:    [[VTMP1:%.*]] = call i1 @gen1()
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i8 [[Y:%.*]], -1
+; CHECK-NEXT:    [[VTMP4:%.*]] = xor i1 [[VTMP1]], [[TMP1]]
+; CHECK-NEXT:    ret i1 [[VTMP4]]
 ;
   %tmp1 = call i1 @gen1()
   %tmp2 = icmp slt i8 %y, 0
@@ -70,12 +70,12 @@ declare void @use1(i1)
 
 define i1 @oneuse_easyinvert_0(i8 %y) {
 ; CHECK-LABEL: @oneuse_easyinvert_0(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @gen1()
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
-; CHECK-NEXT:    call void @use1(i1 [[TMP2]])
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
-; CHECK-NEXT:    ret i1 [[TMP4]]
+; CHECK-NEXT:    [[VTMP1:%.*]] = call i1 @gen1()
+; CHECK-NEXT:    [[VTMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
+; CHECK-NEXT:    call void @use1(i1 [[VTMP2]])
+; CHECK-NEXT:    [[VTMP3:%.*]] = xor i1 [[VTMP1]], [[VTMP2]]
+; CHECK-NEXT:    [[VTMP4:%.*]] = xor i1 [[VTMP3]], true
+; CHECK-NEXT:    ret i1 [[VTMP4]]
 ;
   %tmp1 = call i1 @gen1()
   %tmp2 = icmp slt i8 %y, 0
@@ -87,12 +87,12 @@ define i1 @oneuse_easyinvert_0(i8 %y) {
 
 define i1 @oneuse_easyinvert_1(i8 %y) {
 ; CHECK-LABEL: @oneuse_easyinvert_1(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @gen1()
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT:    call void @use1(i1 [[TMP3]])
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
-; CHECK-NEXT:    ret i1 [[TMP4]]
+; CHECK-NEXT:    [[VTMP1:%.*]] = call i1 @gen1()
+; CHECK-NEXT:    [[VTMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
+; CHECK-NEXT:    [[VTMP3:%.*]] = xor i1 [[VTMP1]], [[VTMP2]]
+; CHECK-NEXT:    call void @use1(i1 [[VTMP3]])
+; CHECK-NEXT:    [[VTMP4:%.*]] = xor i1 [[VTMP3]], true
+; CHECK-NEXT:    ret i1 [[VTMP4]]
 ;
   %tmp1 = call i1 @gen1()
   %tmp2 = icmp slt i8 %y, 0
@@ -104,13 +104,13 @@ define i1 @oneuse_easyinvert_1(i8 %y) {
 
 define i1 @oneuse_easyinvert_2(i8 %y) {
 ; CHECK-LABEL: @oneuse_easyinvert_2(
-; CHECK-NEXT:    [[TMP1:%.*]] = call i1 @gen1()
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
-; CHECK-NEXT:    call void @use1(i1 [[TMP2]])
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
-; CHECK-NEXT:    call void @use1(i1 [[TMP3]])
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
-; CHECK-NEXT:    ret i1 [[TMP4]]
+; CHECK-NEXT:    [[VTMP1:%.*]] = call i1 @gen1()
+; CHECK-NEXT:    [[VTMP2:%.*]] = icmp slt i8 [[Y:%.*]], 0
+; CHECK-NEXT:    call void @use1(i1 [[VTMP2]])
+; CHECK-NEXT:    [[VTMP3:%.*]] = xor i1 [[VTMP1]], [[VTMP2]]
+; CHECK-NEXT:    call void @use1(i1 [[VTMP3]])
+; CHECK-NEXT:    [[VTMP4:%.*]] = xor i1 [[VTMP3]], true
+; CHECK-NEXT:    ret i1 [[VTMP4]]
 ;
   %tmp1 = call i1 @gen1()
   %tmp2 = icmp slt i8 %y, 0
@@ -128,9 +128,9 @@ define i1 @oneuse_easyinvert_2(i8 %y) {
 ; Not easily invertible.
 define i32 @negative(i32 %x, i32 %y) {
 ; CHECK-LABEL: @negative(
-; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP1]], -1
-; CHECK-NEXT:    ret i32 [[TMP2]]
+; CHECK-NEXT:    [[VTMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[VTMP2:%.*]] = xor i32 [[VTMP1]], -1
+; CHECK-NEXT:    ret i32 [[VTMP2]]
 ;
   %tmp1 = xor i32 %x, %y
   %tmp2 = xor i32 %tmp1, -1

diff  --git a/llvm/test/Transforms/InstCombine/free-inversion.ll b/llvm/test/Transforms/InstCombine/free-inversion.ll
index d3b6a5801b53b6a..c6fe6a21cb91759 100644
--- a/llvm/test/Transforms/InstCombine/free-inversion.ll
+++ b/llvm/test/Transforms/InstCombine/free-inversion.ll
@@ -11,8 +11,8 @@ declare void @use.i8(i8)
 define i8 @xor_1(i8 %a, i1 %c, i8 %x, i8 %y) {
 ; CHECK-LABEL: @xor_1(
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
-; CHECK-NEXT:    [[B_NOT:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
-; CHECK-NEXT:    [[NOT_BA:%.*]] = xor i8 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
+; CHECK-NEXT:    [[NOT_BA:%.*]] = xor i8 [[TMP2]], [[A:%.*]]
 ; CHECK-NEXT:    ret i8 [[NOT_BA]]
 ;
   %nx = xor i8 %x, -1
@@ -26,8 +26,8 @@ define i8 @xor_1(i8 %a, i1 %c, i8 %x, i8 %y) {
 define i8 @xor_2(i8 %a, i1 %c, i8 %x, i8 %y) {
 ; CHECK-LABEL: @xor_2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124
-; CHECK-NEXT:    [[B_NOT:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
-; CHECK-NEXT:    [[NOT_AB:%.*]] = xor i8 [[B_NOT]], [[A:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]]
+; CHECK-NEXT:    [[NOT_AB:%.*]] = xor i8 [[TMP2]], [[A:%.*]]
 ; CHECK-NEXT:    ret i8 [[NOT_AB]]
 ;
   %nx = xor i8 %x, -1

diff  --git a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
index 74b8bfab82ed48e..095ac5b27f59635 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
@@ -131,7 +131,7 @@ define i1 @PR46561(i1 %a, i1 %x, i1 %y, i8 %z) {
 ; CHECK-NEXT:    [[MULBOOL:%.*]] = and i1 [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    [[TMP0:%.*]] = and i8 [[Z:%.*]], 1
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i8 [[TMP0]], 0
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[TMP1]], [[MULBOOL]]
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[MULBOOL]], [[TMP1]]
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[P:%.*]] = phi i1 [ [[TMP2]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ]

diff  --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
index 6df9bb06ca56d2e..4ff5c714bcd4516 100644
--- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
@@ -1263,7 +1263,7 @@ define i8 @freeToInvert(i8 %x, i8 %y, i8 %z) {
 ; CHECK-NEXT:    call void @use(i8 [[NY]])
 ; CHECK-NEXT:    call void @use(i8 [[NZ]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[X]], i8 [[Y]])
-; CHECK-NEXT:    [[NOT:%.*]] = call i8 @llvm.smax.i8(i8 [[TMP1]], i8 [[Z]])
+; CHECK-NEXT:    [[NOT:%.*]] = call i8 @llvm.smax.i8(i8 [[Z]], i8 [[TMP1]])
 ; CHECK-NEXT:    ret i8 [[NOT]]
 ;
   %nx = xor i8 %x, -1


        


More information about the llvm-commits mailing list