[llvm] [InstCombine] Fold xored one-complemented operand comparisons (PR #69882)

via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 25 08:56:56 PDT 2023


https://github.com/elhewaty updated https://github.com/llvm/llvm-project/pull/69882

>From 2b8f2622c1125018003ebcbec9015fa326817dcd Mon Sep 17 00:00:00 2001
From: Mohamed Atef <mohamedatef1698 at gmail.com>
Date: Tue, 24 Oct 2023 16:54:14 +0300
Subject: [PATCH 1/2] [InstCombine] Add test coverage for comparisons of
 operands including one-complemented oparands(NFC)

---
 .../Transforms/InstCombine/icmp-of-xor-x.ll   | 295 ++++++++++++++++++
 1 file changed, 295 insertions(+)

diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
index 9b6572697cf5e8f..94856424e102b03 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -3,6 +3,301 @@
 
 declare void @llvm.assume(i1)
 declare void @barrier()
+declare void @use.i8(i8)
+
+; test for (~x ^ y) < ~z
+define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %nz = xor i8 %z, -1
+  %r = icmp slt i8 %xor2, %nz
+  ret i1 %r
+}
+
+; test for ~z <= (x ^ ~y)
+define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor2(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %x
+  %r = icmp sle i8 %nz, %xor2
+  ret i1 %r
+}
+
+; test for ~z > (~x ^ y)
+define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor3(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp sgt i8 %nz, %xor2
+  ret i1 %r
+}
+
+; tests for equality
+define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  %xor2 = xor i8 %xor, %x
+  %r = icmp ne i8 %nz, %xor2
+  ret i1 %r
+}
+
+define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  %xor2 = xor i8 %xor, %x
+  %r = icmp eq i8 %nz, %xor2
+  ret i1 %r
+}
+
+; other tests
+define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor4(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp sge i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor5(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ult i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor6(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ule i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor7(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ugt i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor8(
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp uge i8 %xor2, %nz
+  ret i1 %r
+}
+
+; test (~a ^ b) < ~a
+define i1 @test_slt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_slt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp slt i32 %4, %3
+  ret i1 %5
+}
+
+; test (a ^ ~b) <= ~b
+define i1 @test_sle_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sle_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP0:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp sle i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %1, -1
+  %4 = xor i32 %3, %0
+  %5 = icmp sle i32 %4, %3
+  ret i1 %5
+}
+
+; test ~a > (~a ^ b)
+define i1 @test_sgt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sgt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp sgt i32 %3, %4
+  ret i1 %5
+}
+
+define i1 @test_sge_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sge_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp sge i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp sge i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ult_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ult_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ult i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ule_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ule_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ule i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ugt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ugt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ugt i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_uge_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_uge_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT:    ret i1 [[TMP5]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp uge i32 %4, %3
+  ret i1 %5
+}
+
+; Negative tests
+define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1_nofold_multi_use(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[NZ]])
+; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %nz = xor i8 %z, -1
+  call void @use.i8(i8 %nz)
+  %r = icmp slt i8 %xor2, %nz
+  ret i1 %r
+}
 
 define i1 @xor_uge(i8 %x, i8 %y) {
 ; CHECK-LABEL: @xor_uge(

>From 61c05b74be322c5b704850d6deadf55a875685e6 Mon Sep 17 00:00:00 2001
From: Mohamed Atef <mohamedatef1698 at gmail.com>
Date: Wed, 25 Oct 2023 13:29:31 +0300
Subject: [PATCH 2/2] [InstCombine] Fold xored one-complemented operand

---
 .../InstCombine/InstCombineCompares.cpp       |  18 +++-
 .../Transforms/InstCombine/icmp-of-xor-x.ll   | 100 ++++++++----------
 2 files changed, 59 insertions(+), 59 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index a09c9b48be9d5b2..a9cd545e4098122 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -7022,7 +7022,7 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
     return Res;
 
   {
-    Value *X, *Y;
+    Value *X, *Y, *Z;
     // Transform (X & ~Y) == 0 --> (X & Y) != 0
     // and       (X & ~Y) != 0 --> (X & Y) == 0
     // if A is a power of 2.
@@ -7032,6 +7032,22 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
                           Op1);
 
+    // Transform (~X ^ Y) s< ~Z  --> (X ^ Y) s> Z,
+    //           (~X ^ Y) s> ~Z  --> (X ^ Y) s< Z,
+    //           (~X ^ Y) s<= ~Z --> (X ^ Y) s>= Z,
+    //           (~X ^ Y) s>= ~Z --> (X ^ Y) s<= Z,
+    //           (~X ^ Y) u< ~Z  --> (X ^ Y) u< Z,
+    //           (~X ^ Y) u> ~Z  --> (X ^ Y) u< Z,
+    //           (~X ^ Y) u<= ~Z --> (X ^ Y) u>= Z,
+    //           (~X ^ Y) u>= ~Z --> (X ^ Y) <= Z,
+    //           (~X ^ Y) == ~Z  --> (X ^ Y) != Z,
+    // and       (~X ^ Y) != ~Z  --> (X ^ Y) == Z,
+    if (match(&I, m_c_ICmp(Pred, m_c_Xor(m_Not(m_Value(X)), m_Value(Y)),
+                           m_Not(m_Value(Z)))) &&
+        (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse()))
+      return new ICmpInst(I.getSwappedPredicate(Pred), Builder.CreateXor(X, Y),
+                          Z);
+
     // ~X < ~Y --> Y < X
     // ~X < C -->  X > ~C
     if (match(Op0, m_Not(m_Value(X)))) {
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
index 94856424e102b03..d673a5f8ccc8957 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -10,9 +10,8 @@ define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor1(
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xor = xor i8 %x, -1
@@ -26,11 +25,10 @@ define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
 ; test for ~z <= (x ^ ~y)
 define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor2(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[X:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -44,11 +42,10 @@ define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
 ; test for ~z > (~x ^ y)
 define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor3(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -89,11 +86,10 @@ define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
 ; other tests
 define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor4(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -106,11 +102,10 @@ define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor5(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -123,11 +118,10 @@ define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor6(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -140,11 +134,10 @@ define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor7(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -157,11 +150,10 @@ define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
 
 define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor8(
-; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %nz = xor i8 %z, -1
@@ -175,10 +167,9 @@ define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
 ; test (~a ^ b) < ~a
 define i1 @test_slt_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_slt_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -189,10 +180,9 @@ define i1 @test_slt_xor(i32 %0, i32 %1) {
 ; test (a ^ ~b) <= ~b
 define i1 @test_sle_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_sle_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP0:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp sle i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sge i32 [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %1, -1
   %4 = xor i32 %3, %0
@@ -203,10 +193,9 @@ define i1 @test_sle_xor(i32 %0, i32 %1) {
 ; test ~a > (~a ^ b)
 define i1 @test_sgt_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_sgt_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -216,10 +205,9 @@ define i1 @test_sgt_xor(i32 %0, i32 %1) {
 
 define i1 @test_sge_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_sge_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp sge i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sle i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -229,10 +217,9 @@ define i1 @test_sge_xor(i32 %0, i32 %1) {
 
 define i1 @test_ult_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_ult_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -242,10 +229,9 @@ define i1 @test_ult_xor(i32 %0, i32 %1) {
 
 define i1 @test_ule_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_ule_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ule i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp uge i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -255,10 +241,9 @@ define i1 @test_ule_xor(i32 %0, i32 %1) {
 
 define i1 @test_ugt_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_ugt_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -268,10 +253,9 @@ define i1 @test_ugt_xor(i32 %0, i32 %1) {
 
 define i1 @test_uge_xor(i32 %0, i32 %1) {
 ; CHECK-LABEL: @test_uge_xor(
-; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], -1
-; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[TMP5:%.*]] = icmp uge i32 [[TMP4]], [[TMP3]]
-; CHECK-NEXT:    ret i1 [[TMP5]]
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
 ;
   %3 = xor i32 %0, -1
   %4 = xor i32 %3, %1
@@ -284,10 +268,10 @@ define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
 ; CHECK-LABEL: @test_xor1_nofold_multi_use(
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
-; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[XOR]], [[Y:%.*]]
 ; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
 ; CHECK-NEXT:    call void @use.i8(i8 [[NZ]])
-; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR2]], [[NZ]]
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
   %xor = xor i8 %x, -1



More information about the llvm-commits mailing list