[llvm] d88d279 - [NFC][InstCombine] Add tests for low bit splatting pattern (PR51305)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sat Aug 7 07:33:07 PDT 2021


Author: Roman Lebedev
Date: 2021-08-07T17:25:28+03:00
New Revision: d88d279e765c31f2dcbf81256e75642ba3ad51f1

URL: https://github.com/llvm/llvm-project/commit/d88d279e765c31f2dcbf81256e75642ba3ad51f1
DIFF: https://github.com/llvm/llvm-project/commit/d88d279e765c31f2dcbf81256e75642ba3ad51f1.diff

LOG: [NFC][InstCombine] Add tests for low bit splatting pattern (PR51305)

Added: 
    llvm/test/Transforms/InstCombine/low-bit-splat.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/low-bit-splat.ll b/llvm/test/Transforms/InstCombine/low-bit-splat.ll
new file mode 100644
index 000000000000..ce891f7d03f1
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/low-bit-splat.ll
@@ -0,0 +1,165 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; PR51305: prefer `-(x & 1)` over `(x << (bitwidth(x)-1)) a>> (bitwidth(x)-1)`
+; as the pattern to splat the lowest bit.
+
+declare void @use8(i8)
+
+; Basic positive scalar tests
+define i8 @t0(i8 %x) {
+; CHECK-LABEL: @t0(
+; CHECK-NEXT:    [[I0:%.*]] = shl i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I0]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = shl i8 %x, 7
+  %r = ashr i8 %i0, 7
+  ret i8 %r
+}
+define i16 @t1_otherbitwidth(i16 %x) {
+; CHECK-LABEL: @t1_otherbitwidth(
+; CHECK-NEXT:    [[I0:%.*]] = shl i16 [[X:%.*]], 15
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i16 [[I0]], 15
+; CHECK-NEXT:    ret i16 [[R]]
+;
+  %i0 = shl i16 %x, 15
+  %r = ashr i16 %i0, 15
+  ret i16 %r
+}
+
+; Basic positive vector tests
+define <2 x i8> @t2_vec(<2 x i8> %x) {
+; CHECK-LABEL: @t2_vec(
+; CHECK-NEXT:    [[I0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 7, i8 7>
+; CHECK-NEXT:    [[R:%.*]] = ashr exact <2 x i8> [[I0]], <i8 7, i8 7>
+; CHECK-NEXT:    ret <2 x i8> [[R]]
+;
+  %i0 = shl <2 x i8> %x, <i8 7, i8 7>
+  %r = ashr <2 x i8> %i0, <i8 7, i8 7>
+  ret <2 x i8> %r
+}
+define <3 x i8> @t3_vec_undef0(<3 x i8> %x) {
+; CHECK-LABEL: @t3_vec_undef0(
+; CHECK-NEXT:    [[I0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 7, i8 undef, i8 7>
+; CHECK-NEXT:    [[R:%.*]] = ashr <3 x i8> [[I0]], <i8 7, i8 7, i8 7>
+; CHECK-NEXT:    ret <3 x i8> [[R]]
+;
+  %i0 = shl <3 x i8> %x, <i8 7, i8 undef, i8 7>
+  %r = ashr <3 x i8> %i0, <i8 7, i8 7, i8 7>
+  ret <3 x i8> %r
+}
+define <3 x i8> @t4_vec_undef1(<3 x i8> %x) {
+; CHECK-LABEL: @t4_vec_undef1(
+; CHECK-NEXT:    [[I0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 7, i8 7, i8 7>
+; CHECK-NEXT:    [[R:%.*]] = ashr <3 x i8> [[I0]], <i8 7, i8 undef, i8 7>
+; CHECK-NEXT:    ret <3 x i8> [[R]]
+;
+  %i0 = shl <3 x i8> %x, <i8 7, i8 7, i8 7>
+  %r = ashr <3 x i8> %i0, <i8 7, i8 undef, i8 7>
+  ret <3 x i8> %r
+}
+define <3 x i8> @t5_vec_undef2(<3 x i8> %x) {
+; CHECK-LABEL: @t5_vec_undef2(
+; CHECK-NEXT:    [[I0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 7, i8 undef, i8 7>
+; CHECK-NEXT:    [[R:%.*]] = ashr <3 x i8> [[I0]], <i8 7, i8 undef, i8 7>
+; CHECK-NEXT:    ret <3 x i8> [[R]]
+;
+  %i0 = shl <3 x i8> %x, <i8 7, i8 undef, i8 7>
+  %r = ashr <3 x i8> %i0, <i8 7, i8 undef, i8 7>
+  ret <3 x i8> %r
+}
+
+; In general, the `shl` needs to go away.
+define i8 @n6_extrause(i8 %x) {
+; CHECK-LABEL: @n6_extrause(
+; CHECK-NEXT:    [[I0:%.*]] = shl i8 [[X:%.*]], 7
+; CHECK-NEXT:    call void @use8(i8 [[I0]])
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I0]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = shl i8 %x, 7
+  call void @use8(i8 %i0)
+  %r = ashr i8 %i0, 7
+  ret i8 %r
+}
+
+; But, if the input to the shift is already masked, then we're fine.
+define i8 @t7_already_masked(i8 %x) {
+; CHECK-LABEL: @t7_already_masked(
+; CHECK-NEXT:    [[I0:%.*]] = and i8 [[X:%.*]], 1
+; CHECK-NEXT:    call void @use8(i8 [[I0]])
+; CHECK-NEXT:    [[I1:%.*]] = shl i8 [[X]], 7
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I1]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = and i8 %x, 1
+  call void @use8(i8 %i0)
+  %i1 = shl i8 %i0, 7
+  %r = ashr i8 %i1, 7
+  ret i8 %r
+}
+define i8 @t8_already_masked_extrause(i8 %x) {
+; CHECK-LABEL: @t8_already_masked_extrause(
+; CHECK-NEXT:    [[I0:%.*]] = and i8 [[X:%.*]], 1
+; CHECK-NEXT:    call void @use8(i8 [[I0]])
+; CHECK-NEXT:    [[I1:%.*]] = shl i8 [[X]], 7
+; CHECK-NEXT:    call void @use8(i8 [[I1]])
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I1]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = and i8 %x, 1
+  call void @use8(i8 %i0)
+  %i1 = shl i8 %i0, 7
+  call void @use8(i8 %i1)
+  %r = ashr i8 %i1, 7
+  ret i8 %r
+}
+define i8 @n9_wrongly_masked_extrause(i8 %x) {
+; CHECK-LABEL: @n9_wrongly_masked_extrause(
+; CHECK-NEXT:    [[I0:%.*]] = and i8 [[X:%.*]], 3
+; CHECK-NEXT:    call void @use8(i8 [[I0]])
+; CHECK-NEXT:    [[I1:%.*]] = shl i8 [[X]], 7
+; CHECK-NEXT:    call void @use8(i8 [[I1]])
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I1]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = and i8 %x, 3
+  call void @use8(i8 %i0)
+  %i1 = shl i8 %i0, 7
+  call void @use8(i8 %i1)
+  %r = ashr i8 %i1, 7
+  ret i8 %r
+}
+
+; Wrong shift amounts
+define i8 @n10(i8 %x) {
+; CHECK-LABEL: @n10(
+; CHECK-NEXT:    [[I0:%.*]] = shl i8 [[X:%.*]], 6
+; CHECK-NEXT:    [[R:%.*]] = ashr i8 [[I0]], 7
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = shl i8 %x, 6 ; not 7
+  %r = ashr i8 %i0, 7
+  ret i8 %r
+}
+define i8 @n11(i8 %x) {
+; CHECK-LABEL: @n11(
+; CHECK-NEXT:    [[I0:%.*]] = shl i8 [[X:%.*]], 7
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I0]], 6
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = shl i8 %x, 7
+  %r = ashr i8 %i0, 6 ; not 7
+  ret i8 %r
+}
+define i8 @n12(i8 %x) {
+; CHECK-LABEL: @n12(
+; CHECK-NEXT:    [[I0:%.*]] = shl i8 [[X:%.*]], 6
+; CHECK-NEXT:    [[R:%.*]] = ashr exact i8 [[I0]], 6
+; CHECK-NEXT:    ret i8 [[R]]
+;
+  %i0 = shl i8 %x, 6 ; not 7
+  %r = ashr i8 %i0, 6 ; not 7
+  ret i8 %r
+}


        


More information about the llvm-commits mailing list