[clang-tools-extra] [InstCombine] Fold xored one-complemented operand comparisons (PR #69882)

via cfe-commits cfe-commits at lists.llvm.org
Wed Oct 25 03:36:05 PDT 2023


https://github.com/elhewaty updated https://github.com/llvm/llvm-project/pull/69882

>From 3a6ddf5192264e2dc32adc67184e06529151b343 Mon Sep 17 00:00:00 2001
From: Mohamed Atef <mohamedatef1698 at gmail.com>
Date: Tue, 24 Oct 2023 16:54:14 +0300
Subject: [PATCH 1/2] [InstCombine] Add test coverage for comparisons of
 operands including one-complemented oparands(NFC)

---
 .../Transforms/InstCombine/icmp-of-xor-x.ll   | 295 ++++++++++++++++++
 1 file changed, 295 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
index 9b6572697cf5e8f..94856424e102b03 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -3,6 +3,301 @@
 
 declare void @llvm.assume(i1)
 declare void @barrier()
+declare void @use.i8(i8)
+
+; test for (~x ^ y) < ~z
+define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %nz = xor i8 %z, -1
+  %r = icmp slt i8 %xor2, %nz
+  ret i1 %r
+}
+
+; test for ~z <= (x ^ ~y)
+define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor2(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %x
+  %r = icmp sle i8 %nz, %xor2
+  ret i1 %r
+}
+
+; test for ~z > (~x ^ y)
+define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor3(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp sgt i8 %nz, %xor2
+  ret i1 %r
+}
+
+; tests for equality
+define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  %xor2 = xor i8 %xor, %x
+  %r = icmp ne i8 %nz, %xor2
+  ret i1 %r
+}
+
+define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  %xor2 = xor i8 %xor, %x
+  %r = icmp eq i8 %nz, %xor2
+  ret i1 %r
+}
+
+; other tests
+define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor4(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp sge i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor5(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ult i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor6(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ule i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor7(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ugt i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor8(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp uge i8 %xor2, %nz
+  ret i1 %r
+}
+
+; test (~a ^ b) < ~a
+define i1 @test_slt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_slt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp slt i32 %4, %3
+  ret i1 %5
+}
+
+; test (a ^ ~b) <= ~b
+define i1 @test_sle_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sle_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP0:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp sle i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %1, -1
+  %4 = xor i32 %3, %0
+  %5 = icmp sle i32 %4, %3
+  ret i1 %5
+}
+
+; test ~a > (~a ^ b)
+define i1 @test_sgt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sgt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp sgt i32 %3, %4
+  ret i1 %5
+}
+
+define i1 @test_sge_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sge_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp sge i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp sge i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ult_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ult_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ult i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ule_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ule_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ule i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ugt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ugt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ugt i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_uge_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_uge_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp uge i32 %4, %3
+  ret i1 %5
+}
+
+; Negative tests
+define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1_nofold_multi_use(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[NZ]])
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %nz = xor i8 %z, -1
+  call void @use.i8(i8 %nz)
+  %r = icmp slt i8 %xor2, %nz
+  ret i1 %r
+}
 
 define i1 @xor_uge(i8 %x, i8 %y) {
 ; CHECK-LABEL: @xor_uge(

>From 718675887d3a41428f7026c031999c21638f1a14 Mon Sep 17 00:00:00 2001
From: Mohamed Atef <mohamedatef1698 at gmail.com>
Date: Wed, 25 Oct 2023 13:29:31 +0300
Subject: [PATCH 2/2] [InstCombine] Fold xored one-complemented operand

---
 .../InstCombine/InstCombineCompares.cpp       |  51 +++++----
 .../Transforms/InstCombine/icmp-of-xor-x.ll   | 100 ++++++++----------
 2 files changed, 75 insertions(+), 76 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 66e2b6c72cce46c..346083b52e19904 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -7117,38 +7117,54 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
     return Res;
 
   {
-    Value *A, *B;
-    // Transform (A & ~B) == 0 --> (A & B) != 0
-    // and       (A & ~B) != 0 --> (A & B) == 0
-    // if A is a power of 2.
-    if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
-        match(Op1, m_Zero()) &&
-        isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
-      return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
+    Value *X, *Y, *Z;
+    // Transform (X & ~Y) == 0 --> (X & Y) != 0
+    // and       (X & ~Y) != 0 --> (X & Y) == 0
+    // if X is a power of 2.
+    if (match(Op0, m_And(m_Value(X), m_Not(m_Value(Y)))) &&
+        match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, 0, &I) &&
+        I.isEquality())
+      return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
                           Op1);
 
+    // Transform (~X ^ Y) s< ~Z  --> (X ^ Y) s> Z,
+    //           (~X ^ Y) s> ~Z  --> (X ^ Y) s< Z,
+    //           (~X ^ Y) s<= ~Z --> (X ^ Y) s>= Z,
+    //           (~X ^ Y) s>= ~Z --> (X ^ Y) s<= Z,
+    //           (~X ^ Y) u< ~Z  --> (X ^ Y) u< Z,
+    //           (~X ^ Y) u> ~Z  --> (X ^ Y) u< Z,
+    //           (~X ^ Y) u<= ~Z --> (X ^ Y) u>= Z,
+    //           (~X ^ Y) u>= ~Z --> (X ^ Y) <= Z,
+    //           (~X ^ Y) == ~Z  --> (X ^ Y) != Z,
+    // and       (~X ^ Y) != ~Z  --> (X ^ Y) == Z,
+    if (match(&I, m_c_ICmp(Pred, m_c_Xor(m_Not(m_Value(X)), m_Value(Y)),
+                           m_Not(m_Value(Z)))) &&
+        (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse()))
+      return new ICmpInst(I.getSwappedPredicate(Pred), Builder.CreateXor(X, Y),
+                          Z);
+
     // ~X < ~Y --> Y < X
     // ~X < C -->  X > ~C
-    if (match(Op0, m_Not(m_Value(A)))) {
-      if (match(Op1, m_Not(m_Value(B))))
-        return new ICmpInst(I.getPredicate(), B, A);
+    if (match(Op0, m_Not(m_Value(X)))) {
+      if (match(Op1, m_Not(m_Value(Y))))
+        return new ICmpInst(I.getPredicate(), Y, X);
 
       const APInt *C;
       if (match(Op1, m_APInt(C)))
-        return new ICmpInst(I.getSwappedPredicate(), A,
+        return new ICmpInst(I.getSwappedPredicate(), X,
                             ConstantInt::get(Op1->getType(), ~(*C)));
     }
 
     Instruction *AddI = nullptr;
-    if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
+    if (match(&I, m_UAddWithOverflow(m_Value(X), m_Value(Y),
                                      m_Instruction(AddI))) &&
-        isa<IntegerType>(A->getType())) {
+        isa<IntegerType>(X->getType())) {
       Value *Result;
       Constant *Overflow;
       // m_UAddWithOverflow can match patterns that do not include  an explicit
       // "add" instruction, so check the opcode of the matched op.
       if (AddI->getOpcode() == Instruction::Add &&
-          OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, A, B, *AddI,
+          OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, X, Y, *AddI,
                                 Result, Overflow)) {
         replaceInstUsesWith(*AddI, Result);
         eraseInstFromFunction(*AddI);
@@ -7157,16 +7173,15 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
     }
 
     // (zext a) * (zext b)  --> llvm.umul.with.overflow.
-    if (match(Op0, m_NUWMul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
+    if (match(Op0, m_NUWMul(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y))))) {
       if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
         return R;
     }
-    if (match(Op1, m_NUWMul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
+    if (match(Op1, m_NUWMul(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y))))) {
       if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
         return R;
     }
 
-    Value *X, *Y;
     // Signbit test folds
     // Fold (X u>> BitWidth - 1 Pred ZExt(i1))  -->  X s< 0 Pred i1
     // Fold (X s>> BitWidth - 1 Pred SExt(i1))  -->  X s< 0 Pred i1
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
index 94856424e102b03..d673a5f8ccc8957 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -10,9 +10,8 @@ define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor1(
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xor = xor i8 %x, -1
@@ -26,11 +25,10 @@ define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
 ; test for ~z <= (x ^ ~y)
 define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor2(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[X:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -44,11 +42,10 @@ define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
 ; test for ~z > (~x ^ y)
 define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor3(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -89,11 +86,10 @@ define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
 ; other tests
 define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor4(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -106,11 +102,10 @@ define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor5(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -123,11 +118,10 @@ define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor6(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -140,11 +134,10 @@ define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor7(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -157,11 +150,10 @@ define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor8(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -175,10 +167,9 @@ define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
 ; test (~a ^ b) < ~a
 define i1 @test_slt_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_slt_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -189,10 +180,9 @@ define i1 @test_slt_xor(i32 %0, i32 %1) {
 ; test (a ^ ~b) <= ~b
 define i1 @test_sle_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_sle_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP0:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp sle i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sge i32 [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %1, -1
   %4 = xor i32 %3, %0
@@ -203,10 +193,9 @@ define i1 @test_sle_xor(i32 %0, i32 %1) {
 ; test ~a > (~a ^ b)
 define i1 @test_sgt_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_sgt_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -216,10 +205,9 @@ define i1 @test_sgt_xor(i32 %0, i32 %1) {
 
 define i1 @test_sge_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_sge_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp sge i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sle i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -229,10 +217,9 @@ define i1 @test_sge_xor(i32 %0, i32 %1) {
 
 define i1 @test_ult_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_ult_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -242,10 +229,9 @@ define i1 @test_ult_xor(i32 %0, i32 %1) {
 
 define i1 @test_ule_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_ule_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp uge i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -255,10 +241,9 @@ define i1 @test_ule_xor(i32 %0, i32 %1) {
 
 define i1 @test_ugt_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_ugt_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -268,10 +253,9 @@ define i1 @test_ugt_xor(i32 %0, i32 %1) {
 
 define i1 @test_uge_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_uge_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -284,10 +268,10 @@ define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor1_nofold_multi_use(
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
 ; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[NZ]])
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xor = xor i8 %x, -1



More information about the cfe-commits mailing list