[llvm] r368207 - [X86] Add test cases for missed opportunities to use pack instructions for 512->256 truncates with prefer-vector-width=256

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 7 13:54:47 PDT 2019


Author: ctopper
Date: Wed Aug  7 13:54:46 2019
New Revision: 368207

URL: http://llvm.org/viewvc/llvm-project?rev=368207&view=rev
Log:
[X86] Add test cases for missed opportunities to use pack instructions for 512->256 truncates with prefer-vector-width=256

If the 512-bit vectors are going to be split anyway, then we are
better off using pack to implicitly concatenate the 256 bit pieces.

Modified:
    llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll

Modified: llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll?rev=368207&r1=368206&r2=368207&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll (original)
+++ llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll Wed Aug  7 13:54:46 2019
@@ -751,3 +751,93 @@ define <8 x i16> @trunc_v8i64_v8i16(<8 x
   %b = trunc <8 x i64> %a to <8 x i16>
   ret <8 x i16> %b
 }
+
+define <8 x i32> @trunc_v8i64_v8i32_zeroes(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v8i64_v8i32_zeroes:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlq $48, 32(%rdi), %ymm0
+; CHECK-NEXT:    vpsrlq $48, (%rdi), %ymm1
+; CHECK-NEXT:    vpmovqd %ymm1, %xmm1
+; CHECK-NEXT:    vpmovqd %ymm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %a = load <8 x i64>, <8 x i64>* %x
+  %b = lshr <8 x i64> %a, <i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48>
+  %c = trunc <8 x i64> %b to <8 x i32>
+  ret <8 x i32> %c
+}
+
+define <16 x i16> @trunc_v16i32_v16i16_zeroes(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v16i32_v16i16_zeroes:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrld $16, 32(%rdi), %ymm0
+; CHECK-NEXT:    vpsrld $16, (%rdi), %ymm1
+; CHECK-NEXT:    vpmovdw %ymm1, %xmm1
+; CHECK-NEXT:    vpmovdw %ymm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %a = load <16 x i32>, <16 x i32>* %x
+  %b = lshr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %c = trunc <16 x i32> %b to <16 x i16>
+  ret <16 x i16> %c
+}
+
+define <32 x i8> @trunc_v32i16_v32i8_zeroes(<32 x i16>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v32i16_v32i8_zeroes:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlw $8, 32(%rdi), %ymm0
+; CHECK-NEXT:    vpsrlw $8, (%rdi), %ymm1
+; CHECK-NEXT:    vpmovwb %ymm1, %xmm1
+; CHECK-NEXT:    vpmovwb %ymm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %a = load <32 x i16>, <32 x i16>* %x
+  %b = lshr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %c = trunc <32 x i16> %b to <32 x i8>
+  ret <32 x i8> %c
+}
+
+define <8 x i32> @trunc_v8i64_v8i32_sign(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v8i64_v8i32_sign:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsraq $48, 32(%rdi), %ymm0
+; CHECK-NEXT:    vpsraq $48, (%rdi), %ymm1
+; CHECK-NEXT:    vpmovqd %ymm1, %xmm1
+; CHECK-NEXT:    vpmovqd %ymm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %a = load <8 x i64>, <8 x i64>* %x
+  %b = ashr <8 x i64> %a, <i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48, i64 48>
+  %c = trunc <8 x i64> %b to <8 x i32>
+  ret <8 x i32> %c
+}
+
+define <16 x i16> @trunc_v16i32_v16i16_sign(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v16i32_v16i16_sign:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrad $16, 32(%rdi), %ymm0
+; CHECK-NEXT:    vpsrad $16, (%rdi), %ymm1
+; CHECK-NEXT:    vpmovdw %ymm1, %xmm1
+; CHECK-NEXT:    vpmovdw %ymm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %a = load <16 x i32>, <16 x i32>* %x
+  %b = ashr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+  %c = trunc <16 x i32> %b to <16 x i16>
+  ret <16 x i16> %c
+}
+
+define <32 x i8> @trunc_v32i16_v32i8_sign(<32 x i16>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v32i16_v32i8_sign:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsraw $8, 32(%rdi), %ymm0
+; CHECK-NEXT:    vpsraw $8, (%rdi), %ymm1
+; CHECK-NEXT:    vpmovwb %ymm1, %xmm1
+; CHECK-NEXT:    vpmovwb %ymm0, %xmm0
+; CHECK-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; CHECK-NEXT:    retq
+  %a = load <32 x i16>, <32 x i16>* %x
+  %b = ashr <32 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+  %c = trunc <32 x i16> %b to <32 x i8>
+  ret <32 x i8> %c
+}




More information about the llvm-commits mailing list