[llvm] [InstCombine] Fold xored one-complemented operand comparisons (PR #69882)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 26 00:55:50 PDT 2023


================
@@ -3,6 +3,285 @@
 
 declare void @llvm.assume(i1)
 declare void @barrier()
+declare void @use.i8(i8)
+
+; test for (~x ^ y) < ~z
+define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %nz = xor i8 %z, -1
+  %r = icmp slt i8 %xor2, %nz
+  ret i1 %r
+}
+
+; test for ~z <= (x ^ ~y)
+define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor2(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %x
+  %r = icmp sle i8 %nz, %xor2
+  ret i1 %r
+}
+
+; test for ~z > (~x ^ y)
+define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor3(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp sgt i8 %nz, %xor2
+  ret i1 %r
+}
+
+; tests for equality
+define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_ne(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  %xor2 = xor i8 %xor, %x
+  %r = icmp ne i8 %nz, %xor2
+  ret i1 %r
+}
+
+define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor_eq(
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %y, -1
+  %xor2 = xor i8 %xor, %x
+  %r = icmp eq i8 %nz, %xor2
+  ret i1 %r
+}
+
+; other tests
+define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor4(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp sge i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor5(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ult i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor6(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ule i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor7(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp ugt i8 %xor2, %nz
+  ret i1 %r
+}
+
+define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor8(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %nz = xor i8 %z, -1
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
+  %r = icmp uge i8 %xor2, %nz
+  ret i1 %r
+}
+
+; test (~a ^ b) < ~a
+define i1 @test_slt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_slt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp slt i32 %4, %3
+  ret i1 %5
+}
+
+; test (a ^ ~b) <= ~b
+define i1 @test_sle_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sle_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1:%.*]], [[TMP0:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sge i32 [[TMP3]], [[TMP1]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %1, -1
+  %4 = xor i32 %3, %0
+  %5 = icmp sle i32 %4, %3
+  ret i1 %5
+}
+
+; test ~a > (~a ^ b)
+define i1 @test_sgt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sgt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp sgt i32 %3, %4
+  ret i1 %5
+}
+
+define i1 @test_sge_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_sge_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp sle i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp sge i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ult_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ult_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ult i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ule_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ule_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp uge i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ule i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_ugt_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_ugt_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp ugt i32 %4, %3
+  ret i1 %5
+}
+
+define i1 @test_uge_xor(i32 %0, i32 %1) {
+; CHECK-LABEL: @test_uge_xor(
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP0:%.*]], [[TMP1:%.*]]
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp ule i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    ret i1 [[TMP4]]
+;
+  %3 = xor i32 %0, -1
+  %4 = xor i32 %3, %1
+  %5 = icmp uge i32 %4, %3
+  ret i1 %5
+}
+
+; Negative tests
+define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
+; CHECK-LABEL: @test_xor1_nofold_multi_use(
+; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
+; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
+; CHECK-NEXT:    call void @use.i8(i8 [[NZ]])
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]]
+; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z]]
+; CHECK-NEXT:    ret i1 [[R]]
+;
+  %xor = xor i8 %x, -1
+  call void @use.i8(i8 %xor)
+  %xor2 = xor i8 %xor, %y
----------------
nikic wrote:

The extra use needs to be on %xor2 rather than %xor, otherwise the fold will still happen, so it's not a negative test.

https://github.com/llvm/llvm-project/pull/69882


More information about the llvm-commits mailing list