[llvm] 7c5cc22 - [InstCombine] Add tests for transforming `(icmp (xor X, Y), X)`; NFC

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 17 20:39:18 PDT 2023


Author: Noah Goldstein
Date: 2023-04-17T22:39:06-05:00
New Revision: 7c5cc22b55673e451b5643c3b921d1fea0a41d30

URL: https://github.com/llvm/llvm-project/commit/7c5cc22b55673e451b5643c3b921d1fea0a41d30
DIFF: https://github.com/llvm/llvm-project/commit/7c5cc22b55673e451b5643c3b921d1fea0a41d30.diff

LOG: [InstCombine] Add tests for transforming `(icmp (xor X, Y), X)`; NFC

Differential Revision: https://reviews.llvm.org/D144607

Added: 
    llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
new file mode 100644
index 0000000000000..c6d23504f04e2
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare void @llvm.assume(i1)
+declare void @barrier()
+
+define i1 @xor_uge(i8 %x, i8 %y) {
+; CHECK-LABEL: @xor_uge(
+; CHECK-NEXT:    [[YNZ:%.*]] = icmp ne i8 [[Y:%.*]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[YNZ]])
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR]], [[X]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %ynz = icmp ne i8 %y, 0
+  call void @llvm.assume(i1 %ynz)
+  %xor = xor i8 %x, %y
+  %r = icmp uge i8 %xor, %x
+  ret i1 %r
+}
+
+define i1 @xor_uge_fail_maybe_zero(i8 %x, i8 %y) {
+; CHECK-LABEL: @xor_uge_fail_maybe_zero(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR]], [[X]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, %y
+  %r = icmp uge i8 %xor, %x
+  ret i1 %r
+}
+
+define <2 x i1> @xor_ule_2(<2 x i8> %x, <2 x i8> %yy) {
+; CHECK-LABEL: @xor_ule_2(
+; CHECK-NEXT:    [[Y:%.*]] = or <2 x i8> [[YY:%.*]], <i8 9, i8 8>
+; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[Y]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ule <2 x i8> [[XOR]], [[X]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %y = or <2 x i8> %yy, <i8 9, i8 8>
+  %xor = xor <2 x i8> %y, %x
+  %r = icmp ule <2 x i8> %xor, %x
+  ret <2 x i1> %r
+}
+
+define i1 @xor_sle_2(i8 %xx, i8 %y, i8 %z) {
+; CHECK-LABEL: @xor_sle_2(
+; CHECK-NEXT:    [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    [[YNZ:%.*]] = icmp ne i8 [[Y:%.*]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[YNZ]])
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[X]], [[XOR]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x = add i8 %xx, %z
+  %ynz = icmp ne i8 %y, 0
+  call void @llvm.assume(i1 %ynz)
+  %xor = xor i8 %x, %y
+  %r = icmp sle i8 %x, %xor
+  ret i1 %r
+}
+
+define i1 @xor_sge(i8 %xx, i8 %yy) {
+; CHECK-LABEL: @xor_sge(
+; CHECK-NEXT:    [[X:%.*]] = mul i8 [[XX:%.*]], [[XX]]
+; CHECK-NEXT:    [[Y:%.*]] = or i8 [[YY:%.*]], -128
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y]], [[X]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sge i8 [[X]], [[XOR]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x = mul i8 %xx, %xx
+  %y = or i8 %yy, 128
+  %xor = xor i8 %y, %x
+  %r = icmp sge i8 %x, %xor
+  ret i1 %r
+}
+
+define i1 @xor_ugt_2(i8 %xx, i8 %y, i8 %z) {
+; CHECK-LABEL: @xor_ugt_2(
+; CHECK-NEXT:    [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    [[YZ:%.*]] = and i8 [[Y:%.*]], 63
+; CHECK-NEXT:    [[Y1:%.*]] = or i8 [[YZ]], 64
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X]], [[Y1]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[X]], [[XOR]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %x = add i8 %xx, %z
+  %yz = and i8 %y, 63
+  %y1 = or i8 %yz, 64
+  %xor = xor i8 %x, %y1
+  %r = icmp ugt i8 %x, %xor
+  ret i1 %r
+}
+
+define i1 @xor_ult(i8 %x) {
+; CHECK-LABEL: @xor_ult(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], 123
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[XOR]], [[X]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, 123
+  %r = icmp ult i8 %xor, %x
+  ret i1 %r
+}
+
+define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @xor_sgt(
+; CHECK-NEXT:    [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], <i8 31, i8 31>
+; CHECK-NEXT:    [[Y1:%.*]] = or <2 x i8> [[YZ]], <i8 64, i8 64>
+; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %yz = and <2 x i8> %y, <i8 31, i8 31>
+  %y1 = or <2 x i8> %yz, <i8 64, i8 64>
+  %xor = xor <2 x i8> %x, %y1
+  %r = icmp sgt <2 x i8> %xor, %x
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @xor_sgt_fail_no_known_msb(
+; CHECK-NEXT:    [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], <i8 55, i8 55>
+; CHECK-NEXT:    [[Y1:%.*]] = or <2 x i8> [[YZ]], <i8 8, i8 8>
+; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %yz = and <2 x i8> %y, <i8 63, i8 63>
+  %y1 = or <2 x i8> %yz, <i8 8, i8 8>
+  %xor = xor <2 x i8> %x, %y1
+  %r = icmp sgt <2 x i8> %xor, %x
+  ret <2 x i1> %r
+}
+
+define i1 @xor_slt_2(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @xor_slt_2(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], 88
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[XOR]], [[X]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, 88
+  %r = icmp slt i8 %x, %xor
+  ret i1 %r
+}
+
+define <2 x i1> @xor_sgt_intmin_2(<2 x i8> %xx, <2 x i8> %yy, <2 x i8> %z) {
+; CHECK-LABEL: @xor_sgt_intmin_2(
+; CHECK-NEXT:    [[X:%.*]] = add <2 x i8> [[XX:%.*]], [[Z:%.*]]
+; CHECK-NEXT:    [[Y:%.*]] = or <2 x i8> [[YY:%.*]], <i8 -128, i8 -128>
+; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[X]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i8> [[X]], [[XOR]]
+; CHECK-NEXT:    ret <2 x i1> [[R]]
+;
+  %x = add <2 x i8> %xx, %z
+  %y = or <2 x i8> %yy, <i8 128, i8 128>
+  %xor = xor <2 x i8> %x, %y
+  %r = icmp sgt <2 x i8> %x, %xor
+  ret <2 x i1> %r
+}
+
+define i1 @or_slt_intmin_indirect(i8 %x, i8 %C) {
+; CHECK-LABEL: @or_slt_intmin_indirect(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[C:%.*]], 0
+; CHECK-NEXT:    br i1 [[CMP]], label [[NEG:%.*]], label [[POS:%.*]]
+; CHECK:       common.ret:
+; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i1 [ [[R:%.*]], [[NEG]] ], [ false, [[POS]] ]
+; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
+; CHECK:       neg:
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[C]], [[X:%.*]]
+; CHECK-NEXT:    [[R]] = icmp slt i8 [[XOR]], [[X]]
+; CHECK-NEXT:    br label [[COMMON_RET:%.*]]
+; CHECK:       pos:
+; CHECK-NEXT:    tail call void @barrier()
+; CHECK-NEXT:    br label [[COMMON_RET]]
+;
+  %cmp = icmp slt i8 %C, 0
+  br i1 %cmp, label %neg, label %pos
+common.ret:
+  %common.ret.op = phi i1 [ %r, %neg ], [ false, %pos ]
+  ret i1 %common.ret.op
+neg:
+  %xor = xor i8 %C, %x
+  %r = icmp slt i8 %xor, %x
+  br label %common.ret
+pos:
+  tail call void @barrier()
+  br label %common.ret
+}


        


More information about the llvm-commits mailing list