[llvm] r287504 - [X86][SSE] Add some initial combine tests that could (should?) use PACKSS

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 20 13:12:50 PST 2016


Author: rksimon
Date: Sun Nov 20 15:12:49 2016
New Revision: 287504

URL: http://llvm.org/viewvc/llvm-project?rev=287504&view=rev
Log:
[X86][SSE] Add some initial combine tests that could (should?) use PACKSS 

Added:
    llvm/trunk/test/CodeGen/X86/packss.ll

Added: llvm/trunk/test/CodeGen/X86/packss.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/packss.ll?rev=287504&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/packss.ll (added)
+++ llvm/trunk/test/CodeGen/X86/packss.ll Sun Nov 20 15:12:49 2016
@@ -0,0 +1,147 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX2
+
+define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
+; X32-SSE-LABEL: trunc_ashr_v4i64:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    psrad $31, %xmm0
+; X32-SSE-NEXT:    psrad $31, %xmm1
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X32-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: trunc_ashr_v4i64:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    psrad $31, %xmm0
+; X64-SSE-NEXT:    psrad $31, %xmm1
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; X64-SSE-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: trunc_ashr_v4i64:
+; X64-AVX1:       # BB#0:
+; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; X64-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT:    vzeroupper
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: trunc_ashr_v4i64:
+; X64-AVX2:       # BB#0:
+; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7]
+; X64-AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; X64-AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    retq
+  %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
+  %2 = trunc <4 x i64> %1 to <4 x i32>
+  ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
+; X32-SSE-LABEL: trunc_ashr_v8i32:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    psrad $31, %xmm0
+; X32-SSE-NEXT:    psrad $31, %xmm1
+; X32-SSE-NEXT:    pslld $16, %xmm1
+; X32-SSE-NEXT:    psrad $16, %xmm1
+; X32-SSE-NEXT:    pslld $16, %xmm0
+; X32-SSE-NEXT:    psrad $16, %xmm0
+; X32-SSE-NEXT:    packssdw %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: trunc_ashr_v8i32:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    psrad $31, %xmm0
+; X64-SSE-NEXT:    psrad $31, %xmm1
+; X64-SSE-NEXT:    pslld $16, %xmm1
+; X64-SSE-NEXT:    psrad $16, %xmm1
+; X64-SSE-NEXT:    pslld $16, %xmm0
+; X64-SSE-NEXT:    psrad $16, %xmm0
+; X64-SSE-NEXT:    packssdw %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: trunc_ashr_v8i32:
+; X64-AVX1:       # BB#0:
+; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X64-AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X64-AVX1-NEXT:    vzeroupper
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: trunc_ashr_v8i32:
+; X64-AVX2:       # BB#0:
+; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; X64-AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    retq
+  %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  %2 = trunc <8 x i32> %1 to <8 x i16>
+  ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
+; X32-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    psrad $31, %xmm0
+; X32-SSE-NEXT:    pcmpgtd {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT:    pslld $16, %xmm1
+; X32-SSE-NEXT:    psrad $16, %xmm1
+; X32-SSE-NEXT:    pslld $16, %xmm0
+; X32-SSE-NEXT:    psrad $16, %xmm0
+; X32-SSE-NEXT:    packssdw %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    psrad $31, %xmm0
+; X64-SSE-NEXT:    pcmpgtd {{.*}}(%rip), %xmm1
+; X64-SSE-NEXT:    pslld $16, %xmm1
+; X64-SSE-NEXT:    psrad $16, %xmm1
+; X64-SSE-NEXT:    pslld $16, %xmm0
+; X64-SSE-NEXT:    psrad $16, %xmm0
+; X64-SSE-NEXT:    packssdw %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: trunc_ashr_v4i32_icmp_v4i32:
+; X64-AVX1:       # BB#0:
+; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; X64-AVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX2-LABEL: trunc_ashr_v4i32_icmp_v4i32:
+; X64-AVX2:       # BB#0:
+; X64-AVX2-NEXT:    vpsrad $31, %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; X64-AVX2-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
+; X64-AVX2-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; X64-AVX2-NEXT:    # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-AVX2-NEXT:    vzeroupper
+; X64-AVX2-NEXT:    retq
+  %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
+  %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535>
+  %3 = sext <4 x i1> %2 to <4 x i32>
+  %4 = shufflevector <4 x i32> %1, <4 x i32> %3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %5 = trunc <8 x i32> %4 to <8 x i16>
+  ret <8 x i16> %5
+}




More information about the llvm-commits mailing list