[llvm-branch-commits] [llvm] d9ebaee - [InstCombine] Hoist xor-by-constant from xor-by-value

Roman Lebedev via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Dec 24 10:27:37 PST 2020


Author: Roman Lebedev
Date: 2020-12-24T21:20:50+03:00
New Revision: d9ebaeeb468d6a8f29eb479f18d2790f7efb8565

URL: https://github.com/llvm/llvm-project/commit/d9ebaeeb468d6a8f29eb479f18d2790f7efb8565
DIFF: https://github.com/llvm/llvm-project/commit/d9ebaeeb468d6a8f29eb479f18d2790f7efb8565.diff

LOG: [InstCombine] Hoist xor-by-constant from xor-by-value

This is one of the deficiencies that can be observed in
https://godbolt.org/z/YPczsG after D91038 patch set.

This exposed two missing folds, one was fixed by the previous commit,
another one is `(A ^ B) | ~(A ^ B) --> -1` / `(A ^ B) & ~(A ^ B) --> 0`.

`-early-cse` will catch it: https://godbolt.org/z/4n1T1v,
but isn't meaningful to fix it in InstCombine,
because we'd need to essentially do our own CSE,
and we can't even rely on `Instruction::isIdenticalTo()`,
because there are no guarantees that the order of operands matches.
So let's just accept it as a loss.

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
    llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
    llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
    llvm/test/Transforms/InstCombine/or-xor.ll
    llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll
    llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll
    llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
    llvm/test/Transforms/InstCombine/vec_shuffle.ll
    llvm/test/Transforms/InstCombine/xor2.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 3c0cdaf6e843..9d3b81a1cdd5 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3457,5 +3457,13 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
   if (Instruction *NewXor = sinkNotIntoXor(I, Builder))
     return NewXor;
 
+  // Otherwise, if all else failed, try to hoist the xor-by-constant:
+  // (X ^ C) ^ Y --> (X ^ Y) ^ C
+  // FIXME: does this need hardening against ConstantExpr's
+  //        to prevent infinite combine loops?
+  if (match(&I,
+            m_c_Xor(m_OneUse(m_Xor(m_Value(X), m_Constant(C1))), m_Value(Y))))
+    return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1);
+
   return nullptr;
 }

diff  --git a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
index 20b114ff0b0c..e5ffb4622cbc 100644
--- a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
+++ b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll
@@ -8,8 +8,8 @@ declare void @use8(i8)
 
 define i8 @t0_scalar(i8 %x, i8 %y) {
 ; CHECK-LABEL: @t0_scalar(
-; CHECK-NEXT:    [[I0:%.*]] = xor i8 [[X:%.*]], 42
-; CHECK-NEXT:    [[R:%.*]] = xor i8 [[I0]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[TMP1]], 42
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %i0 = xor i8 %x, 42
@@ -19,8 +19,8 @@ define i8 @t0_scalar(i8 %x, i8 %y) {
 
 define <2 x i8> @t1_splatvec(<2 x i8> %x, <2 x i8> %y) {
 ; CHECK-LABEL: @t1_splatvec(
-; CHECK-NEXT:    [[I0:%.*]] = xor <2 x i8> [[X:%.*]], <i8 42, i8 42>
-; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[I0]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[TMP1]], <i8 42, i8 42>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %i0 = xor <2 x i8> %x, <i8 42, i8 42>
@@ -29,8 +29,8 @@ define <2 x i8> @t1_splatvec(<2 x i8> %x, <2 x i8> %y) {
 }
 define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) {
 ; CHECK-LABEL: @t2_vec(
-; CHECK-NEXT:    [[I0:%.*]] = xor <2 x i8> [[X:%.*]], <i8 42, i8 24>
-; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[I0]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[TMP1]], <i8 42, i8 24>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %i0 = xor <2 x i8> %x, <i8 42, i8 24>
@@ -39,8 +39,8 @@ define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) {
 }
 define <2 x i8> @t3_vec_undef(<2 x i8> %x, <2 x i8> %y) {
 ; CHECK-LABEL: @t3_vec_undef(
-; CHECK-NEXT:    [[I0:%.*]] = xor <2 x i8> [[X:%.*]], <i8 42, i8 undef>
-; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[I0]], [[Y:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[TMP1]], <i8 42, i8 undef>
 ; CHECK-NEXT:    ret <2 x i8> [[R]]
 ;
   %i0 = xor <2 x i8> %x, <i8 42, i8 undef>
@@ -63,9 +63,9 @@ define i8 @t4_extrause(i8 %x, i8 %y) {
 
 define i8 @t5_commutativity(i8 %x) {
 ; CHECK-LABEL: @t5_commutativity(
-; CHECK-NEXT:    [[I0:%.*]] = xor i8 [[X:%.*]], 42
 ; CHECK-NEXT:    [[Y:%.*]] = call i8 @gen8()
-; CHECK-NEXT:    [[R:%.*]] = xor i8 [[Y]], [[I0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[TMP1]], 42
 ; CHECK-NEXT:    ret i8 [[R]]
 ;
   %i0 = xor i8 %x, 42

diff  --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
index b7cf96d1f22a..e1bda5903f6f 100644
--- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
+++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll
@@ -52,9 +52,8 @@ define i4 @in_constant_varx_6_invmask(i4 %x, i4 %mask) {
 
 define i4 @in_constant_mone_vary_invmask(i4 %y, i4 %mask) {
 ; CHECK-LABEL: @in_constant_mone_vary_invmask(
-; CHECK-NEXT:    [[N1_DEMORGAN:%.*]] = or i4 [[Y:%.*]], [[MASK:%.*]]
-; CHECK-NEXT:    [[N1:%.*]] = xor i4 [[N1_DEMORGAN]], -1
-; CHECK-NEXT:    [[R:%.*]] = xor i4 [[N1]], [[Y]]
+; CHECK-NEXT:    [[MASK_NOT:%.*]] = xor i4 [[MASK:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = or i4 [[MASK_NOT]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i4 [[R]]
 ;
   %notmask = xor i4 %mask, -1

diff  --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
index a2e427b0c464..44e12fb64b95 100644
--- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
+++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll
@@ -94,9 +94,8 @@ define <3 x i4> @in_constant_varx_6_invmask_undef(<3 x i4> %x, <3 x i4> %mask) {
 
 define <2 x i4> @in_constant_mone_vary_invmask(<2 x i4> %y, <2 x i4> %mask) {
 ; CHECK-LABEL: @in_constant_mone_vary_invmask(
-; CHECK-NEXT:    [[N1_DEMORGAN:%.*]] = or <2 x i4> [[Y:%.*]], [[MASK:%.*]]
-; CHECK-NEXT:    [[N1:%.*]] = xor <2 x i4> [[N1_DEMORGAN]], <i4 -1, i4 -1>
-; CHECK-NEXT:    [[R:%.*]] = xor <2 x i4> [[N1]], [[Y]]
+; CHECK-NEXT:    [[MASK_NOT:%.*]] = xor <2 x i4> [[MASK:%.*]], <i4 -1, i4 -1>
+; CHECK-NEXT:    [[R:%.*]] = or <2 x i4> [[MASK_NOT]], [[Y:%.*]]
 ; CHECK-NEXT:    ret <2 x i4> [[R]]
 ;
   %notmask = xor <2 x i4> %mask, <i4 -1, i4 -1>

diff  --git a/llvm/test/Transforms/InstCombine/or-xor.ll b/llvm/test/Transforms/InstCombine/or-xor.ll
index 56c5fca2e76b..2ec54c8d53e5 100644
--- a/llvm/test/Transforms/InstCombine/or-xor.ll
+++ b/llvm/test/Transforms/InstCombine/or-xor.ll
@@ -93,9 +93,15 @@ define i32 @test9(i32 %x, i32 %y) {
   ret i32 %z
 }
 
+; (A ^ B) | (~A ^ B) --> -1
+
 define i32 @test10(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test10(
-; CHECK-NEXT:    ret i32 -1
+; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[XOR1]], [[XOR2]]
+; CHECK-NEXT:    ret i32 [[OR]]
 ;
   %xor1 = xor i32 %B, %A
   %not = xor i32 %A, -1
@@ -106,15 +112,63 @@ define i32 @test10(i32 %A, i32 %B) {
 
 define i32 @test10_commuted(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test10_commuted(
+; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[XOR1]], [[XOR2]]
+; CHECK-NEXT:    ret i32 [[OR]]
+;
+  %xor1 = xor i32 %B, %A
+  %not = xor i32 %A, -1
+  %xor2 = xor i32 %not, %B
+  %or = or i32 %xor2, %xor1
+  ret i32 %or
+}
+
+define i32 @test10_extrause(i32 %A, i32 %B, i32* %dst) {
+; CHECK-LABEL: @test10_extrause(
+; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT:    store i32 [[NOT]], i32* [[DST:%.*]], align 4
+; CHECK-NEXT:    ret i32 -1
+;
+  %xor1 = xor i32 %B, %A
+  %not = xor i32 %A, -1
+  store i32 %not, i32* %dst
+  %xor2 = xor i32 %not, %B
+  %or = or i32 %xor1, %xor2
+  ret i32 %or
+}
+
+define i32 @test10_commuted_extrause(i32 %A, i32 %B, i32* %dst) {
+; CHECK-LABEL: @test10_commuted_extrause(
+; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[A:%.*]], -1
+; CHECK-NEXT:    store i32 [[NOT]], i32* [[DST:%.*]], align 4
 ; CHECK-NEXT:    ret i32 -1
 ;
   %xor1 = xor i32 %B, %A
   %not = xor i32 %A, -1
+  store i32 %not, i32* %dst
   %xor2 = xor i32 %not, %B
   %or = or i32 %xor2, %xor1
   ret i32 %or
 }
 
+; (A ^ B) | ~(A ^ B) --> -1
+define i32 @test10_canonical(i32 %A, i32 %B) {
+; CHECK-LABEL: @test10_canonical(
+; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[XOR2]], -1
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[XOR1]], [[NOT]]
+; CHECK-NEXT:    ret i32 [[OR]]
+;
+  %xor1 = xor i32 %B, %A
+  %xor2 = xor i32 %A, %B
+  %not = xor i32 %xor2, -1
+  %or = or i32 %xor1, %not
+  ret i32 %or
+}
+
 ; (x | y) & ((~x) ^ y) -> (x & y)
 define i32 @test11(i32 %x, i32 %y) {
 ; CHECK-LABEL: @test11(
@@ -304,9 +358,9 @@ define i8 @or_xor_or(i8 %x) {
 define i8 @test17(i8 %A, i8 %B) {
 ; CHECK-LABEL: @test17(
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i8 [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[A]], 33
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[NOT]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i8 [[XOR1]], 33
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[TMP1]], 33
+; CHECK-NEXT:    [[OR:%.*]] = or i8 [[XOR1]], [[XOR2]]
 ; CHECK-NEXT:    [[RES:%.*]] = mul i8 [[OR]], [[XOR2]]
 ; CHECK-NEXT:    ret i8 [[RES]]
 ;
@@ -321,9 +375,9 @@ define i8 @test17(i8 %A, i8 %B) {
 define i8 @test18(i8 %A, i8 %B) {
 ; CHECK-LABEL: @test18(
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i8 [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[A]], 33
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[NOT]], [[B]]
-; CHECK-NEXT:    [[OR:%.*]] = or i8 [[XOR1]], 33
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[TMP1]], 33
+; CHECK-NEXT:    [[OR:%.*]] = or i8 [[XOR2]], [[XOR1]]
 ; CHECK-NEXT:    [[RES:%.*]] = mul i8 [[OR]], [[XOR2]]
 ; CHECK-NEXT:    ret i8 [[RES]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll
index b3bc74d3cfe0..b78b497135e8 100644
--- a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll
+++ b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-scalar.ll
@@ -60,10 +60,8 @@ define i4 @in_constant_varx_14(i4 %x, i4 %mask) {
 
 define i4 @in_constant_mone_vary(i4 %y, i4 %mask) {
 ; CHECK-LABEL: @in_constant_mone_vary(
-; CHECK-NEXT:    [[N0:%.*]] = and i4 [[Y:%.*]], 1
-; CHECK-NEXT:    [[N1:%.*]] = xor i4 [[N0]], 1
-; CHECK-NEXT:    [[R:%.*]] = xor i4 [[N1]], [[Y]]
-; CHECK-NEXT:    ret i4 [[R]]
+; CHECK-NEXT:    [[R1:%.*]] = or i4 [[Y:%.*]], 1
+; CHECK-NEXT:    ret i4 [[R1]]
 ;
   %n0 = xor i4 %y, -1 ; %x
   %n1 = and i4 %n0, 1

diff  --git a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll
index 4f92514afc87..ef27891c1a80 100644
--- a/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll
+++ b/llvm/test/Transforms/InstCombine/unfold-masked-merge-with-const-mask-vector.ll
@@ -97,10 +97,8 @@ define <3 x i4> @in_constant_varx_14_undef(<3 x i4> %x, <3 x i4> %mask) {
 
 define <2 x i4> @in_constant_mone_vary(<2 x i4> %y, <2 x i4> %mask) {
 ; CHECK-LABEL: @in_constant_mone_vary(
-; CHECK-NEXT:    [[N0:%.*]] = and <2 x i4> [[Y:%.*]], <i4 1, i4 1>
-; CHECK-NEXT:    [[N1:%.*]] = xor <2 x i4> [[N0]], <i4 1, i4 1>
-; CHECK-NEXT:    [[R:%.*]] = xor <2 x i4> [[N1]], [[Y]]
-; CHECK-NEXT:    ret <2 x i4> [[R]]
+; CHECK-NEXT:    [[R1:%.*]] = or <2 x i4> [[Y:%.*]], <i4 1, i4 1>
+; CHECK-NEXT:    ret <2 x i4> [[R1]]
 ;
   %n0 = xor <2 x i4> %y, <i4 -1, i4 -1> ; %x
   %n1 = and <2 x i4> %n0, <i4 1, i4 1>

diff  --git a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
index efdb40a80aea..2548edb56ef5 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
@@ -1749,8 +1749,8 @@ define <3 x i32> @splat_assoc_and(<4 x i32> %x, <3 x i32> %y) {
 define <5 x i32> @splat_assoc_xor(<4 x i32> %x, <5 x i32> %y) {
 ; CHECK-LABEL: @splat_assoc_xor(
 ; CHECK-NEXT:    [[SPLATX:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <5 x i32> zeroinitializer
-; CHECK-NEXT:    [[A:%.*]] = xor <5 x i32> [[Y:%.*]], <i32 42, i32 42, i32 42, i32 42, i32 42>
-; CHECK-NEXT:    [[R:%.*]] = xor <5 x i32> [[SPLATX]], [[A]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <5 x i32> [[SPLATX]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor <5 x i32> [[TMP1]], <i32 42, i32 42, i32 42, i32 42, i32 42>
 ; CHECK-NEXT:    ret <5 x i32> [[R]]
 ;
   %splatx = shufflevector <4 x i32> %x, <4 x i32> undef, <5 x i32> zeroinitializer

diff  --git a/llvm/test/Transforms/InstCombine/vec_shuffle.ll b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
index a033431c4bcc..e19e86a25524 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
@@ -1749,8 +1749,8 @@ define <3 x i32> @splat_assoc_and(<4 x i32> %x, <3 x i32> %y) {
 define <5 x i32> @splat_assoc_xor(<4 x i32> %x, <5 x i32> %y) {
 ; CHECK-LABEL: @splat_assoc_xor(
 ; CHECK-NEXT:    [[SPLATX:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> undef, <5 x i32> zeroinitializer
-; CHECK-NEXT:    [[A:%.*]] = xor <5 x i32> [[Y:%.*]], <i32 42, i32 42, i32 42, i32 42, i32 42>
-; CHECK-NEXT:    [[R:%.*]] = xor <5 x i32> [[SPLATX]], [[A]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor <5 x i32> [[SPLATX]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = xor <5 x i32> [[TMP1]], <i32 42, i32 42, i32 42, i32 42, i32 42>
 ; CHECK-NEXT:    ret <5 x i32> [[R]]
 ;
   %splatx = shufflevector <4 x i32> %x, <4 x i32> undef, <5 x i32> zeroinitializer

diff  --git a/llvm/test/Transforms/InstCombine/xor2.ll b/llvm/test/Transforms/InstCombine/xor2.ll
index fe969769a020..a8056803dd0e 100644
--- a/llvm/test/Transforms/InstCombine/xor2.ll
+++ b/llvm/test/Transforms/InstCombine/xor2.ll
@@ -171,7 +171,11 @@ define i32 @test10b(i32 %b, i32 %c) {
 
 define i32 @test11(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test11(
-; CHECK-NEXT:    ret i32 0
+; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %xor1 = xor i32 %B, %A
   %not = xor i32 %A, -1
@@ -182,7 +186,11 @@ define i32 @test11(i32 %A, i32 %B) {
 
 define i32 @test11b(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test11b(
-; CHECK-NEXT:    ret i32 0
+; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[B:%.*]], [[A:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
+; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %xor1 = xor i32 %B, %A
   %not = xor i32 %A, -1
@@ -194,8 +202,8 @@ define i32 @test11b(i32 %A, i32 %B) {
 define i32 @test11c(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test11c(
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[NOT]], [[B]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
@@ -209,9 +217,9 @@ define i32 @test11c(i32 %A, i32 %B) {
 define i32 @test11d(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test11d(
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[NOT]], [[B]]
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR2]], [[XOR1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %xor1 = xor i32 %A, %B
@@ -225,8 +233,8 @@ define i32 @test11e(i32 %A, i32 %B, i32 %C) {
 ; CHECK-LABEL: @test11e(
 ; CHECK-NEXT:    [[FORCE:%.*]] = mul i32 [[B:%.*]], [[C:%.*]]
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[FORCE]], [[A:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[FORCE]], [[NOT]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[FORCE]], [[A]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
@@ -242,9 +250,9 @@ define i32 @test11f(i32 %A, i32 %B, i32 %C) {
 ; CHECK-LABEL: @test11f(
 ; CHECK-NEXT:    [[FORCE:%.*]] = mul i32 [[B:%.*]], [[C:%.*]]
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i32 [[FORCE]], [[A:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i32 [[A]], -1
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[FORCE]], [[NOT]]
-; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR2]], [[XOR1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[FORCE]], [[A]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i32 [[TMP1]], -1
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]]
 ; CHECK-NEXT:    ret i32 [[AND]]
 ;
   %force = mul i32 %B, %C
@@ -481,9 +489,9 @@ define i32 @xor_or_xor_common_op_extra_use3(i32 %a, i32 %b, i32 %c, i32* %p1, i3
 define i8 @test15(i8 %A, i8 %B) {
 ; CHECK-LABEL: @test15(
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i8 [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[A]], 33
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[NOT]], [[B]]
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[XOR1]], -34
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[TMP1]], 33
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[XOR1]], [[XOR2]]
 ; CHECK-NEXT:    [[RES:%.*]] = mul i8 [[AND]], [[XOR2]]
 ; CHECK-NEXT:    ret i8 [[RES]]
 ;
@@ -498,9 +506,9 @@ define i8 @test15(i8 %A, i8 %B) {
 define i8 @test16(i8 %A, i8 %B) {
 ; CHECK-LABEL: @test16(
 ; CHECK-NEXT:    [[XOR1:%.*]] = xor i8 [[B:%.*]], [[A:%.*]]
-; CHECK-NEXT:    [[NOT:%.*]] = xor i8 [[A]], 33
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[NOT]], [[B]]
-; CHECK-NEXT:    [[AND:%.*]] = and i8 [[XOR1]], -34
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[A]], [[B]]
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[TMP1]], 33
+; CHECK-NEXT:    [[AND:%.*]] = and i8 [[XOR2]], [[XOR1]]
 ; CHECK-NEXT:    [[RES:%.*]] = mul i8 [[AND]], [[XOR2]]
 ; CHECK-NEXT:    ret i8 [[RES]]
 ;


        


More information about the llvm-branch-commits mailing list