[llvm] 8730ef9 - [InstCombine] add tests for inverted signbit splat mask; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 28 06:28:49 PDT 2022


Author: Sanjay Patel
Date: 2022-08-28T09:01:29-04:00
New Revision: 8730ef9ab33341d8358bb8b13bd7c5a6f8c2f3ab

URL: https://github.com/llvm/llvm-project/commit/8730ef9ab33341d8358bb8b13bd7c5a6f8c2f3ab
DIFF: https://github.com/llvm/llvm-project/commit/8730ef9ab33341d8358bb8b13bd7c5a6f8c2f3ab.diff

LOG: [InstCombine] add tests for inverted signbit splat mask; NFC

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/and.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll
index 2c0eed510a38e..dac4b07dcd95c 100644
--- a/llvm/test/Transforms/InstCombine/and.ll
+++ b/llvm/test/Transforms/InstCombine/and.ll
@@ -1854,6 +1854,119 @@ define i8 @not_lshr_bitwidth_mask(i8 %x, i8 %y) {
   ret i8 %r
 }
 
+define i16 @invert_signbit_splat_mask(i8 %x, i16 %y) {
+; CHECK-LABEL: @invert_signbit_splat_mask(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    [[S:%.*]] = sext i8 [[N]] to i16
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %a = ashr i8 %x, 7
+  %n = xor i8 %a, -1
+  %s = sext i8 %n to i16
+  %r = and i16 %s, %y
+  ret i16 %r
+}
+
+define <2 x i16> @invert_signbit_splat_mask_commute(<2 x i5> %x, <2 x i16> %p) {
+; CHECK-LABEL: @invert_signbit_splat_mask_commute(
+; CHECK-NEXT:    [[Y:%.*]] = mul <2 x i16> [[P:%.*]], [[P]]
+; CHECK-NEXT:    [[A:%.*]] = ashr <2 x i5> [[X:%.*]], <i5 4, i5 poison>
+; CHECK-NEXT:    [[N:%.*]] = xor <2 x i5> [[A]], <i5 -1, i5 -1>
+; CHECK-NEXT:    [[S:%.*]] = sext <2 x i5> [[N]] to <2 x i16>
+; CHECK-NEXT:    [[R:%.*]] = and <2 x i16> [[Y]], [[S]]
+; CHECK-NEXT:    ret <2 x i16> [[R]]
+;
+  %y = mul <2 x i16> %p, %p ; thwart complexity-based canonicalization
+  %a = ashr <2 x i5> %x, <i5 4, i5 poison>
+  %n = xor <2 x i5> %a, <i5 -1, i5 -1>
+  %s = sext <2 x i5> %n to <2 x i16>
+  %r = and <2 x i16> %y, %s
+  ret <2 x i16> %r
+}
+
+define i16 @invert_signbit_splat_mask_use1(i8 %x, i16 %y) {
+; CHECK-LABEL: @invert_signbit_splat_mask_use1(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
+; CHECK-NEXT:    call void @use8(i8 [[A]])
+; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    [[S:%.*]] = sext i8 [[N]] to i16
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %a = ashr i8 %x, 7
+  call void @use8(i8 %a)
+  %n = xor i8 %a, -1
+  %s = sext i8 %n to i16
+  %r = and i16 %s, %y
+  ret i16 %r
+}
+
+define i16 @invert_signbit_splat_mask_use2(i8 %x, i16 %y) {
+; CHECK-LABEL: @invert_signbit_splat_mask_use2(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    call void @use8(i8 [[N]])
+; CHECK-NEXT:    [[S:%.*]] = sext i8 [[N]] to i16
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %a = ashr i8 %x, 7
+  %n = xor i8 %a, -1
+  call void @use8(i8 %n)
+  %s = sext i8 %n to i16
+  %r = and i16 %s, %y
+  ret i16 %r
+}
+
+define i16 @invert_signbit_splat_mask_use3(i8 %x, i16 %y) {
+; CHECK-LABEL: @invert_signbit_splat_mask_use3(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    [[S:%.*]] = sext i8 [[N]] to i16
+; CHECK-NEXT:    call void @use16(i16 [[S]])
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %a = ashr i8 %x, 7
+  %n = xor i8 %a, -1
+  %s = sext i8 %n to i16
+  call void @use16(i16 %s)
+  %r = and i16 %s, %y
+  ret i16 %r
+}
+
+define i16 @not_invert_signbit_splat_mask1(i8 %x, i16 %y) {
+; CHECK-LABEL: @not_invert_signbit_splat_mask1(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[N]] to i16
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[Z]], [[Y:%.*]]
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %a = ashr i8 %x, 7
+  %n = xor i8 %a, -1
+  %z = zext i8 %n to i16
+  %r = and i16 %z, %y
+  ret i16 %r
+}
+
+define i16 @not_invert_signbit_splat_mask2(i8 %x, i16 %y) {
+; CHECK-LABEL: @not_invert_signbit_splat_mask2(
+; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 6
+; CHECK-NEXT:    [[N:%.*]] = xor i8 [[A]], -1
+; CHECK-NEXT:    [[S:%.*]] = sext i8 [[N]] to i16
+; CHECK-NEXT:    [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %a = ashr i8 %x, 6
+  %n = xor i8 %a, -1
+  %s = sext i8 %n to i16
+  %r = and i16 %s, %y
+  ret i16 %r
+}
+
 ; CTTZ(ShlC) < LShrC
 
 define i16 @shl_lshr_pow2_const_case1(i16 %x) {


        


More information about the llvm-commits mailing list