[llvm] 24aa302 - [X86][SSE] Add uniform vector shift test coverage for (sra (trunc (sr[al] x, c1)), c2) folds

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 18 03:38:53 PST 2021


Author: Simon Pilgrim
Date: 2021-02-18T11:38:41Z
New Revision: 24aa30254dceea3f55860f8cdcb6b65bf37a5176

URL: https://github.com/llvm/llvm-project/commit/24aa30254dceea3f55860f8cdcb6b65bf37a5176
DIFF: https://github.com/llvm/llvm-project/commit/24aa30254dceea3f55860f8cdcb6b65bf37a5176.diff

LOG: [X86][SSE] Add uniform vector shift test coverage for (sra (trunc (sr[al] x, c1)), c2) folds

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-sra.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll
index e8dd6d0ef97f..465f10ce5e9e 100644
--- a/llvm/test/CodeGen/X86/combine-sra.ll
+++ b/llvm/test/CodeGen/X86/combine-sra.ll
@@ -226,6 +226,34 @@ define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
   ret <4 x i32> %3
 }
 
+define <16 x i8> @combine_vec_ashr_trunc_lshr_splat(<16 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_trunc_lshr_splat:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrad $26, %xmm3
+; SSE-NEXT:    psrad $26, %xmm2
+; SSE-NEXT:    packssdw %xmm3, %xmm2
+; SSE-NEXT:    psrad $26, %xmm1
+; SSE-NEXT:    psrad $26, %xmm0
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    packsswb %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_ashr_trunc_lshr_splat:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrad $26, %ymm1, %ymm1
+; AVX-NEXT:    vpsrad $26, %ymm0, %ymm0
+; AVX-NEXT:    vpackssdw %ymm1, %ymm0, %ymm0
+; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %1 = lshr <16 x i32> %x, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
+  %2 = trunc <16 x i32> %1 to <16 x i8>
+  %3 = ashr <16 x i8> %2, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
+  ret <16 x i8> %3
+}
+
 ; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
 ;      if c1 is equal to the number of bits the trunc removes
 define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
@@ -263,6 +291,27 @@ define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
   ret <4 x i32> %3
 }
 
+define <8 x i16> @combine_vec_ashr_trunc_ashr_splat(<8 x i32> %x) {
+; SSE-LABEL: combine_vec_ashr_trunc_ashr_splat:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrad $19, %xmm1
+; SSE-NEXT:    psrad $19, %xmm0
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_vec_ashr_trunc_ashr_splat:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrad $19, %ymm0, %ymm0
+; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vzeroupper
+; AVX-NEXT:    retq
+  %1 = ashr <8 x i32> %x, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %2 = trunc <8 x i32> %1 to <8 x i16>
+  %3 = ashr <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <8 x i16> %3
+}
+
 ; If the sign bit is known to be zero, switch this to a SRL.
 define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_ashr_positive:


        


More information about the llvm-commits mailing list