[llvm] daddf40 - [InstCombine] Fold xored one-complemented operand comparisons (#69882)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 14 05:54:08 PST 2023
Author: elhewaty
Date: 2023-11-14T21:54:03+08:00
New Revision: daddf402d9a4e3bacc37098d990e56dc9957ca3e
URL: https://github.com/llvm/llvm-project/commit/daddf402d9a4e3bacc37098d990e56dc9957ca3e
DIFF: https://github.com/llvm/llvm-project/commit/daddf402d9a4e3bacc37098d990e56dc9957ca3e.diff
LOG: [InstCombine] Fold xored one-complemented operand comparisons (#69882)
- [InstCombine] Add test coverage for comparisons of operands including
one-complemented oparands(NFC).
- [InstCombine] Fold xored one-complemented operand comparisons.
Alive2: https://alive2.llvm.org/ce/z/PZMJeB
Fixes #69803.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 03bc0f16c8938ae..9bc84c7dd6e1539 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -7009,7 +7009,7 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
return Res;
{
- Value *X, *Y;
+ Value *X, *Y, *Z;
// Transform (X & ~Y) == 0 --> (X & Y) != 0
// and (X & ~Y) != 0 --> (X & Y) == 0
// if A is a power of 2.
@@ -7019,6 +7019,22 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
Op1);
+ // Transform (~X ^ Y) s< ~Z --> (X ^ Y) s> Z,
+ // (~X ^ Y) s> ~Z --> (X ^ Y) s< Z,
+ // (~X ^ Y) s<= ~Z --> (X ^ Y) s>= Z,
+ // (~X ^ Y) s>= ~Z --> (X ^ Y) s<= Z,
+ // (~X ^ Y) u< ~Z --> (X ^ Y) u< Z,
+ // (~X ^ Y) u> ~Z --> (X ^ Y) u< Z,
+ // (~X ^ Y) u<= ~Z --> (X ^ Y) u>= Z,
+ // (~X ^ Y) u>= ~Z --> (X ^ Y) u<= Z,
+ // (~X ^ Y) == ~Z --> (X ^ Y) == Z,
+ // and (~X ^ Y) != ~Z --> (X ^ Y) != Z,
+ if (match(&I, m_c_ICmp(Pred, m_c_Xor(m_Not(m_Value(X)), m_Value(Y)),
+ m_Not(m_Value(Z)))) &&
+ (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse()))
+ return new ICmpInst(I.getSwappedPredicate(Pred), Builder.CreateXor(X, Y),
+ Z);
+
// ~X < ~Y --> Y < X
// ~X < C --> X > ~C
if (match(Op0, m_Not(m_Value(X)))) {
diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
index 9b6572697cf5e8f..ef4f2bfecfd8ed9 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -3,6 +3,285 @@
declare void @llvm.assume(i1)
declare void @barrier()
+declare void @use.i8(i8)
+
+; test for (~x ^ y) < ~z
+define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %nz = xor i8 %z, -1
+ %r = icmp slt i8 %xor2, %nz
+ ret i1 %r
+}
+
+; test for ~z <= (x ^ ~y)
+define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor2(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %y, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %x
+ %r = icmp sle i8 %nz, %xor2
+ ret i1 %r
+}
+
+; test for ~z > (~x ^ y)
+define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor3(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %r = icmp sgt i8 %nz, %xor2
+ ret i1 %r
+}
+
+; tests for equality
+define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_ne(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %y, -1
+ %xor2 = xor i8 %xor, %x
+ %r = icmp ne i8 %nz, %xor2
+ ret i1 %r
+}
+
+define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_eq(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %y, -1
+ %xor2 = xor i8 %xor, %x
+ %r = icmp eq i8 %nz, %xor2
+ ret i1 %r
+}
+
+; other tests
+define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor4(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %r = icmp sge i8 %xor2, %nz
+ ret i1 %r
+}
+
+define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor5(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %r = icmp ult i8 %xor2, %nz
+ ret i1 %r
+}
+
+define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor6(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %r = icmp ule i8 %xor2, %nz
+ ret i1 %r
+}
+
+define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor7(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %r = icmp ugt i8 %xor2, %nz
+ ret i1 %r
+}
+
+define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor8(
+; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %nz = xor i8 %z, -1
+ %xor = xor i8 %x, -1
+ call void @use.i8(i8 %xor)
+ %xor2 = xor i8 %xor, %y
+ %r = icmp uge i8 %xor2, %nz
+ ret i1 %r
+}
+
+; test (~a ^ b) < ~a
+define i1 @test_slt_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_slt_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %r = icmp slt i32 %xor2, %xor1
+ ret i1 %r
+}
+
+; test (a ^ ~b) <= ~b
+define i1 @test_sle_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_sle_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = icmp sge i32 [[TMP1]], [[Y]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %xor1 = xor i32 %y, -1
+ %xor2 = xor i32 %xor1, %x
+ %r = icmp sle i32 %xor2, %xor1
+ ret i1 %r
+}
+
+; test ~a > (~a ^ b)
+define i1 @test_sgt_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_sgt_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %cmp = icmp sgt i32 %xor2, %xor1
+ ret i1 %cmp
+}
+
+define i1 @test_sge_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_sge_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %cmp = icmp sge i32 %xor2, %xor1
+ ret i1 %cmp
+}
+
+define i1 @test_ult_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_ult_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %cmp = icmp ult i32 %xor2, %xor1
+ ret i1 %cmp
+}
+
+define i1 @test_ule_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_ule_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %cmp = icmp ule i32 %xor2, %xor1
+ ret i1 %cmp
+}
+
+define i1 @test_ugt_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_ugt_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %cmp = icmp ugt i32 %xor2, %xor1
+ ret i1 %cmp
+}
+
+define i1 @test_uge_xor(i32 %x, i32 %y) {
+; CHECK-LABEL: @test_uge_xor(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %xor1 = xor i32 %x, -1
+ %xor2 = xor i32 %xor1, %y
+ %cmp = icmp uge i32 %xor2, %xor1
+ ret i1 %cmp
+}
+
+; Negative tests
+define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1_nofold_multi_use(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[XOR2:%.*]] = xor i8 [[TMP1]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[XOR2]])
+; CHECK-NEXT: [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT: call void @use.i8(i8 [[NZ]])
+; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %xor = xor i8 %x, -1
+ %xor2 = xor i8 %xor, %y
+ call void @use.i8(i8 %xor2)
+ %nz = xor i8 %z, -1
+ call void @use.i8(i8 %nz)
+ %r = icmp slt i8 %xor2, %nz
+ ret i1 %r
+}
define i1 @xor_uge(i8 %x, i8 %y) {
; CHECK-LABEL: @xor_uge(
More information about the llvm-commits
mailing list