[llvm] fb11455 - [InstCombine][X86] Tests for variable but in-range vector-by-scalar shift amounts (PR40391)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 19 06:11:50 PDT 2020


Author: Simon Pilgrim
Date: 2020-03-19T13:11:06Z
New Revision: fb11455038ae62117b6039d7824b75f6927df79a

URL: https://github.com/llvm/llvm-project/commit/fb11455038ae62117b6039d7824b75f6927df79a
DIFF: https://github.com/llvm/llvm-project/commit/fb11455038ae62117b6039d7824b75f6927df79a.diff

LOG: [InstCombine][X86] Tests for variable but in-range vector-by-scalar shift amounts (PR40391)

These shifts are masked to be inrange so we should be able to replace them with generic shifts.

Added: 
    

Modified: 
    llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
index 39a8c3c92327..270cc37b7fd9 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
@@ -2678,6 +2678,105 @@ define <32 x i16> @avx512_psllv_w_512_undef(<32 x i16> %v) {
 ; Vector Masked Shift Amounts
 ;
 
+define <8 x i16> @sse2_psrai_w_128_masked(<8 x i16> %v, i32 %a) {
+; CHECK-LABEL: @sse2_psrai_w_128_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 15
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <8 x i16> [[TMP2]]
+;
+  %1 = and i32 %a, 15
+  %2 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %v, i32 %1)
+  ret <8 x i16> %2
+}
+
+define <8 x i32> @avx2_psrai_d_256_masked(<8 x i32> %v, i32 %a) {
+; CHECK-LABEL: @avx2_psrai_d_256_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 31
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <8 x i32> [[TMP2]]
+;
+  %1 = and i32 %a, 31
+  %2 = tail call <8 x i32> @llvm.x86.avx2.psrai.d(<8 x i32> %v, i32 %1)
+  ret <8 x i32> %2
+}
+
+define <8 x i64> @avx512_psrai_q_512_masked(<8 x i64> %v, i32 %a) {
+; CHECK-LABEL: @avx512_psrai_q_512_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <8 x i64> [[TMP2]]
+;
+  %1 = and i32 %a, 63
+  %2 = tail call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %v, i32 %1)
+  ret <8 x i64> %2
+}
+
+define <4 x i32> @sse2_psrli_d_128_masked(<4 x i32> %v, i32 %a) {
+; CHECK-LABEL: @sse2_psrli_d_128_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 31
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
+;
+  %1 = and i32 %a, 31
+  %2 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %v, i32 %1)
+  ret <4 x i32> %2
+}
+
+define <4 x i64> @avx2_psrli_q_256_masked(<4 x i64> %v, i32 %a) {
+; CHECK-LABEL: @avx2_psrli_q_256_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <4 x i64> [[TMP2]]
+;
+  %1 = and i32 %a, 63
+  %2 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %v, i32 %1)
+  ret <4 x i64> %2
+}
+
+define <32 x i16> @avx512_psrli_w_512_masked(<32 x i16> %v, i32 %a) {
+; CHECK-LABEL: @avx512_psrli_w_512_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 15
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <32 x i16> [[TMP2]]
+;
+  %1 = and i32 %a, 15
+  %2 = tail call <32 x i16> @llvm.x86.avx512.psrli.w.512(<32 x i16> %v, i32 %1)
+  ret <32 x i16> %2
+}
+
+define <2 x i64> @sse2_pslli_q_128_masked(<2 x i64> %v, i32 %a) {
+; CHECK-LABEL: @sse2_pslli_q_128_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
+;
+  %1 = and i32 %a, 63
+  %2 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %v, i32 %1)
+  ret <2 x i64> %2
+}
+
+define <16 x i16> @avx2_pslli_w_256_masked(<16 x i16> %v, i32 %a) {
+; CHECK-LABEL: @avx2_pslli_w_256_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 15
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <16 x i16> [[TMP2]]
+;
+  %1 = and i32 %a, 15
+  %2 = tail call <16 x i16> @llvm.x86.avx2.pslli.w(<16 x i16> %v, i32 %1)
+  ret <16 x i16> %2
+}
+
+define <16 x i32> @avx512_pslli_d_512_masked(<16 x i32> %v, i32 %a) {
+; CHECK-LABEL: @avx512_pslli_d_512_masked(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 31
+; CHECK-NEXT:    [[TMP2:%.*]] = tail call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> [[V:%.*]], i32 [[TMP1]])
+; CHECK-NEXT:    ret <16 x i32> [[TMP2]]
+;
+  %1 = and i32 %a, 31
+  %2 = tail call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %v, i32 %1)
+  ret <16 x i32> %2
+}
+
 define <4 x i32> @avx2_psrav_d_128_masked(<4 x i32> %v, <4 x i32> %a) {
 ; CHECK-LABEL: @avx2_psrav_d_128_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <4 x i32> [[A:%.*]], <i32 31, i32 31, i32 31, i32 31>


        


More information about the llvm-commits mailing list