[llvm] r367600 - [X86] Add some test cases for 512-bit truncate to 128-bits with min-legal-vector-width=0 and prefer-vector-width=256.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 1 11:48:57 PDT 2019
Author: ctopper
Date: Thu Aug 1 11:48:57 2019
New Revision: 367600
URL: http://llvm.org/viewvc/llvm-project?rev=367600&view=rev
Log:
[X86] Add some test cases for 512-bit truncate to 128-bits with min-legal-vector-width=0 and prefer-vector-width=256.
We currently split the 512 type, truncate each half to 128 bits,
concatenate them, and then truncate again. Probably better to
truncate each half to 64-bits and then concat the results
using vpunpcklqdq.
Modified:
llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll
Modified: llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll?rev=367600&r1=367599&r2=367600&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll (original)
+++ llvm/trunk/test/CodeGen/X86/min-legal-vector-width.ll Thu Aug 1 11:48:57 2019
@@ -719,3 +719,35 @@ define <4 x i32> @mload_v4i32(<4 x i32>
ret <4 x i32> %res
}
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
+
+define <16 x i8> @trunc_v16i32_v16i8(<16 x i32>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v16i32_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovdqa (%rdi), %ymm0
+; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
+; CHECK-NEXT: vpmovdw %ymm0, %xmm0
+; CHECK-NEXT: vpmovdw %ymm1, %xmm1
+; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT: vpmovwb %ymm0, %xmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %a = load <16 x i32>, <16 x i32>* %x
+ %b = trunc <16 x i32> %a to <16 x i8>
+ ret <16 x i8> %b
+}
+
+define <8 x i16> @trunc_v8i64_v8i16(<8 x i64>* %x) nounwind "min-legal-vector-width"="256" {
+; CHECK-LABEL: trunc_v8i64_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovdqa (%rdi), %ymm0
+; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
+; CHECK-NEXT: vpmovqd %ymm0, %xmm0
+; CHECK-NEXT: vpmovqd %ymm1, %xmm1
+; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT: vpmovdw %ymm0, %xmm0
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+ %a = load <8 x i64>, <8 x i64>* %x
+ %b = trunc <8 x i64> %a to <8 x i16>
+ ret <8 x i16> %b
+}
More information about the llvm-commits
mailing list