[llvm] r316354 - [X86][SSE] Regenerate PACKSS tests on 32 + 64-bit targets

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 23 10:50:40 PDT 2017


Author: rksimon
Date: Mon Oct 23 10:50:40 2017
New Revision: 316354

URL: http://llvm.org/viewvc/llvm-project?rev=316354&view=rev
Log:
[X86][SSE] Regenerate PACKSS tests on 32 + 64-bit targets

Modified:
    llvm/trunk/test/CodeGen/X86/packss.ll

Modified: llvm/trunk/test/CodeGen/X86/packss.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/packss.ll?rev=316354&r1=316353&r2=316354&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/packss.ll (original)
+++ llvm/trunk/test/CodeGen/X86/packss.ll Mon Oct 23 10:50:40 2017
@@ -1,108 +1,101 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X86-SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X86-AVX --check-prefix=X86-AVX1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X86-AVX --check-prefix=X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx  | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 --check-prefix=X64-AVX --check-prefix=X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=X64-AVX --check-prefix=X64-AVX2
 
 define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind {
-; X32-SSE-LABEL: trunc_ashr_v4i64:
-; X32-SSE:       # BB#0:
-; X32-SSE-NEXT:    psrad $31, %xmm1
-; X32-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE-NEXT:    psrad $31, %xmm0
-; X32-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: trunc_ashr_v4i64:
-; X64-SSE:       # BB#0:
-; X64-SSE-NEXT:    psrad $31, %xmm1
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X64-SSE-NEXT:    psrad $31, %xmm0
-; X64-SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
-;
-; X64-AVX1-LABEL: trunc_ashr_v4i64:
-; X64-AVX1:       # BB#0:
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; X64-AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm1
-; X64-AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
-; X64-AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vzeroupper
-; X64-AVX1-NEXT:    retq
-;
-; X64-AVX2-LABEL: trunc_ashr_v4i64:
-; X64-AVX2:       # BB#0:
-; X64-AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
-; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vzeroupper
-; X64-AVX2-NEXT:    retq
+; SSE-LABEL: trunc_ashr_v4i64:
+; SSE:       # BB#0:
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE-NEXT:    psrad $31, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT:    packsswb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: trunc_ashr_v4i64:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm1
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX2-LABEL: trunc_ashr_v4i64:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    ret{{[l|q]}}
   %1 = ashr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
   %2 = trunc <4 x i64> %1 to <4 x i32>
   ret <4 x i32> %2
 }
 
 define <8 x i16> @trunc_ashr_v8i32(<8 x i32> %a) nounwind {
-; X32-SSE-LABEL: trunc_ashr_v8i32:
-; X32-SSE:       # BB#0:
-; X32-SSE-NEXT:    psrad $31, %xmm1
-; X32-SSE-NEXT:    psrad $31, %xmm0
-; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: trunc_ashr_v8i32:
-; X64-SSE:       # BB#0:
-; X64-SSE-NEXT:    psrad $31, %xmm1
-; X64-SSE-NEXT:    psrad $31, %xmm0
-; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
-;
-; X64-AVX1-LABEL: trunc_ashr_v8i32:
-; X64-AVX1:       # BB#0:
-; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; X64-AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
-; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; X64-AVX1-NEXT:    vzeroupper
-; X64-AVX1-NEXT:    retq
-;
-; X64-AVX2-LABEL: trunc_ashr_v8i32:
-; X64-AVX2:       # BB#0:
-; X64-AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
-; X64-AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT:    vzeroupper
-; X64-AVX2-NEXT:    retq
+; SSE-LABEL: trunc_ashr_v8i32:
+; SSE:       # BB#0:
+; SSE-NEXT:    psrad $31, %xmm1
+; SSE-NEXT:    psrad $31, %xmm0
+; SSE-NEXT:    packsswb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: trunc_ashr_v8i32:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
+; AVX1-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX2-LABEL: trunc_ashr_v8i32:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpsrad $31, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    ret{{[l|q]}}
   %1 = ashr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
   %2 = trunc <8 x i32> %1 to <8 x i16>
   ret <8 x i16> %2
 }
 
 define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
-; X32-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
-; X32-SSE:       # BB#0:
-; X32-SSE-NEXT:    psrad $31, %xmm0
-; X32-SSE-NEXT:    pcmpgtd {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT:    packsswb %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
+; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
+; X86-SSE:       # BB#0:
+; X86-SSE-NEXT:    psrad $31, %xmm0
+; X86-SSE-NEXT:    pcmpgtd {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT:    packsswb %xmm1, %xmm0
+; X86-SSE-NEXT:    ret{{[l|q]}}
+;
+; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
+; X86-AVX:       # BB#0:
+; X86-AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1
+; X86-AVX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
+; X86-AVX-NEXT:    ret{{[l|q]}}
 ;
 ; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
 ; X64-SSE:       # BB#0:
 ; X64-SSE-NEXT:    psrad $31, %xmm0
 ; X64-SSE-NEXT:    pcmpgtd {{.*}}(%rip), %xmm1
 ; X64-SSE-NEXT:    packsswb %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-SSE-NEXT:    ret{{[l|q]}}
 ;
 ; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
 ; X64-AVX:       # BB#0:
 ; X64-AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1
 ; X64-AVX-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT:    retq
+; X64-AVX-NEXT:    ret{{[l|q]}}
   %1 = ashr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
   %2 = icmp sgt <4 x i32> %b, <i32 1, i32 16, i32 255, i32 65535>
   %3 = sext <4 x i1> %2 to <4 x i32>




More information about the llvm-commits mailing list