[llvm] 455688e - [InstCombine] add tests for shifted xor; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 19 10:48:46 PDT 2020


Author: Sanjay Patel
Date: 2020-08-19T13:48:35-04:00
New Revision: 455688e293fa86d5e6de6f96740bc67dce34a179

URL: https://github.com/llvm/llvm-project/commit/455688e293fa86d5e6de6f96740bc67dce34a179
DIFF: https://github.com/llvm/llvm-project/commit/455688e293fa86d5e6de6f96740bc67dce34a179.diff

LOG: [InstCombine] add tests for shifted xor; NFC

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/and-xor-merge.ll
    llvm/test/Transforms/InstCombine/xor.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
index b9a6a536ce7c..3d86754a6635 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll
@@ -1,24 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -instcombine -S | FileCheck %s
 
 ; (x&z) ^ (y&z) -> (x^y)&z
 define i32 @test1(i32 %x, i32 %y, i32 %z) {
 ; CHECK-LABEL: @test1(
-; CHECK-NEXT: %tmp61 = xor i32 %x, %y
-; CHECK-NEXT: %tmp7 = and i32 %tmp61, %z
-; CHECK-NEXT: ret i32 %tmp7
-        %tmp3 = and i32 %z, %x
-        %tmp6 = and i32 %z, %y
-        %tmp7 = xor i32 %tmp3, %tmp6
-        ret i32 %tmp7
+; CHECK-NEXT:    [[T61:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[T7:%.*]] = and i32 [[T61]], [[Z:%.*]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t3 = and i32 %z, %x
+  %t6 = and i32 %z, %y
+  %t7 = xor i32 %t3, %t6
+  ret i32 %t7
 }
 
 ; (x & y) ^ (x|y) -> x^y
 define i32 @test2(i32 %x, i32 %y, i32 %z) {
 ; CHECK-LABEL: @test2(
-; CHECK-NEXT: %tmp7 = xor i32 %y, %x
-; CHECK-NEXT: ret i32 %tmp7
-        %tmp3 = and i32 %y, %x
-        %tmp6 = or i32 %y, %x
-        %tmp7 = xor i32 %tmp3, %tmp6
-        ret i32 %tmp7
+; CHECK-NEXT:    [[T7:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT:    ret i32 [[T7]]
+;
+  %t3 = and i32 %y, %x
+  %t6 = or i32 %y, %x
+  %t7 = xor i32 %t3, %t6
+  ret i32 %t7
+}
+
+define i32 @PR38761(i32 %a, i32 %b) {
+; CHECK-LABEL: @PR38761(
+; CHECK-NEXT:    [[A_LOBIT:%.*]] = lshr i32 [[A:%.*]], 31
+; CHECK-NEXT:    [[A_LOBIT_NOT:%.*]] = xor i32 [[A_LOBIT]], 1
+; CHECK-NEXT:    [[B_LOBIT:%.*]] = lshr i32 [[B:%.*]], 31
+; CHECK-NEXT:    [[B_LOBIT_NOT:%.*]] = xor i32 [[B_LOBIT]], -1
+; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A_LOBIT_NOT]], [[B_LOBIT_NOT]]
+; CHECK-NEXT:    ret i32 [[AND]]
+;
+  %a.lobit = lshr i32 %a, 31
+  %a.lobit.not = xor i32 %a.lobit, 1
+  %b.lobit = lshr i32 %b, 31
+  %b.lobit.not = xor i32 %b.lobit, 1
+  %and = and i32 %b.lobit.not, %a.lobit.not
+  ret i32 %and
 }

diff  --git a/llvm/test/Transforms/InstCombine/xor.ll b/llvm/test/Transforms/InstCombine/xor.ll
index c67a33dbd401..5db898552562 100644
--- a/llvm/test/Transforms/InstCombine/xor.ll
+++ b/llvm/test/Transforms/InstCombine/xor.ll
@@ -4,6 +4,8 @@
 @G1 = global i32 0
 @G2 = global i32 0
 
+declare void @use(i8)
+
 define i1 @test0(i1 %A) {
 ; CHECK-LABEL: @test0(
 ; CHECK-NEXT:    ret i1 [[A:%.*]]
@@ -1010,3 +1012,152 @@ define i32 @not_is_canonical(i32 %x, i32 %y) {
   %mul = shl i32 %add, 2
   ret i32 %mul
 }
+
+define i8 @not_shl(i8 %x) {
+; CHECK-LABEL: @not_shl(
+; CHECK-NEXT:    [[A:%.*]] = shl i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], -128
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = shl i8 %x, 7
+  %r = xor i8 %a, 128
+  ret i8 %r
+}
+
+define <2 x i8> @not_shl_vec(<2 x i8> %x) {
+; CHECK-LABEL: @not_shl_vec(
+; CHECK-NEXT:    [[A:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 5>
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[A]], <i8 -32, i8 -32>
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a = shl <2 x i8> %x, <i8 5, i8 5>
+  %r = xor <2 x i8> %a, <i8 224, i8 224>
+  ret <2 x i8> %r
+}
+
+define i8 @not_shl_extra_use(i8 %x) {
+; CHECK-LABEL: @not_shl_extra_use(
+; CHECK-NEXT:    [[A:%.*]] = shl i8 [[X:%.*]], 7
+; CHECK-NEXT:    call void @use(i8 [[A]])
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], -128
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = shl i8 %x, 7
+  call void @use(i8 %a)
+  %r = xor i8 %a, 128
+  ret i8 %r
+}
+
+define i8 @not_shl_wrong_const(i8 %x) {
+; CHECK-LABEL: @not_shl_wrong_const(
+; CHECK-NEXT:    [[A:%.*]] = shl i8 [[X:%.*]], 6
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], -128
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = shl i8 %x, 6
+  %r = xor i8 %a, 128
+  ret i8 %r
+}
+
+define i8 @not_lshr(i8 %x) {
+; CHECK-LABEL: @not_lshr(
+; CHECK-NEXT:    [[A:%.*]] = lshr i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = lshr i8 %x, 5
+  %r = xor i8 %a, 7
+  ret i8 %r
+}
+
+define <2 x i8> @not_lshr_vec(<2 x i8> %x) {
+; CHECK-LABEL: @not_lshr_vec(
+; CHECK-NEXT:    [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 7, i8 7>
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[A]], <i8 1, i8 1>
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a = lshr <2 x i8> %x, <i8 7, i8 7>
+  %r = xor <2 x i8> %a, <i8 1, i8 1>
+  ret <2 x i8> %r
+}
+
+define i8 @not_lshr_extra_use(i8 %x) {
+; CHECK-LABEL: @not_lshr_extra_use(
+; CHECK-NEXT:    [[A:%.*]] = lshr i8 [[X:%.*]], 5
+; CHECK-NEXT:    call void @use(i8 [[A]])
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = lshr i8 %x, 5
+  call void @use(i8 %a)
+  %r = xor i8 %a, 7
+  ret i8 %r
+}
+
+define i8 @not_lshr_wrong_const(i8 %x) {
+; CHECK-LABEL: @not_lshr_wrong_const(
+; CHECK-NEXT:    [[A:%.*]] = lshr i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], 3
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = lshr i8 %x, 5
+  %r = xor i8 %a, 3
+  ret i8 %r
+}
+
+define i8 @ashr_not(i8 %x) {
+; CHECK-LABEL: @ashr_not(
+; CHECK-NEXT:    [[N:%.*]] = ashr i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[N]], -1
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %n = xor i8 %x, -1
+  %r = ashr i8 %n, 5
+  ret i8 %r
+}
+
+define i8 @not_ashr(i8 %x) {
+; CHECK-LABEL: @not_ashr(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = ashr i8 %x, 5
+  %r = xor i8 %a, -1
+  ret i8 %r
+}
+
+define <2 x i8> @not_ashr_vec(<2 x i8> %x) {
+; CHECK-LABEL: @not_ashr_vec(
+; CHECK-NEXT:    [[A:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 7, i8 7>
+; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[A]], <i8 -1, i8 -1>
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %a = ashr <2 x i8> %x, <i8 7, i8 7>
+  %r = xor <2 x i8> %a, <i8 -1, i8 -1>
+  ret <2 x i8> %r
+}
+
+define i8 @not_ashr_extra_use(i8 %x) {
+; CHECK-LABEL: @not_ashr_extra_use(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 5
+; CHECK-NEXT:    call void @use(i8 [[A]])
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = ashr i8 %x, 5
+  call void @use(i8 %a)
+  %r = xor i8 %a, -1
+  ret i8 %r
+}
+
+define i8 @not_ashr_wrong_const(i8 %x) {
+; CHECK-LABEL: @not_ashr_wrong_const(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 5
+; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], -2
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %a = ashr i8 %x, 5
+  %r = xor i8 %a, -2
+  ret i8 %r
+}


        


More information about the llvm-commits mailing list