[llvm] c0a1f46 - [X86] Add packus.ll test coverage

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 21 05:38:50 PDT 2023


Author: Simon Pilgrim
Date: 2023-07-21T13:32:04+01:00
New Revision: c0a1f4624be917fa64747286fb6c65a9262fd80b

URL: https://github.com/llvm/llvm-project/commit/c0a1f4624be917fa64747286fb6c65a9262fd80b
DIFF: https://github.com/llvm/llvm-project/commit/c0a1f4624be917fa64747286fb6c65a9262fd80b.diff

LOG: [X86] Add packus.ll test coverage

Similar to the existing packss.ll tests

Added: 
    llvm/test/CodeGen/X86/packus.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/packus.ll b/llvm/test/CodeGen/X86/packus.ll
new file mode 100644
index 00000000000000..c1c4cc5b7b0f28
--- /dev/null
+++ b/llvm/test/CodeGen/X86/packus.ll
@@ -0,0 +1,456 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2     | FileCheck %s --check-prefixes=SSE,SSE2,X86-SSE,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2   | FileCheck %s --check-prefixes=SSE,SSE2,X64-SSE,X64-SSE2
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2   | FileCheck %s --check-prefixes=SSE,SSE4,X86-SSE,X86-SSE4
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE4,X64-SSE,X64-SSE4
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx      | FileCheck %s --check-prefixes=AVX,AVX1,X86-AVX,X86-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx    | FileCheck %s --check-prefixes=AVX,AVX1,X64-AVX,X64-AVX1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2     | FileCheck %s --check-prefixes=AVX,AVX2,X86-AVX,X86-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2   | FileCheck %s --check-prefixes=AVX,AVX2,X64-AVX,X64-AVX2
+
+define <4 x i32> @trunc_lshr_v4i64(<4 x i64> %a) nounwind {
+; SSE2-LABEL: trunc_lshr_v4i64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psrlq $63, %xmm1
+; SSE2-NEXT:    psrlq $63, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: trunc_lshr_v4i64:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    psrlq $63, %xmm1
+; SSE4-NEXT:    psrlq $63, %xmm0
+; SSE4-NEXT:    packusdw %xmm1, %xmm0
+; SSE4-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: trunc_lshr_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlq $63, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $63, %xmm0, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX2-LABEL: trunc_lshr_v4i64:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $63, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    ret{{[l|q]}}
+  %1 = lshr <4 x i64> %a, <i64 63, i64 63, i64 63, i64 63>
+  %2 = trunc <4 x i64> %1 to <4 x i32>
+  ret <4 x i32> %2
+}
+
+define <8 x i16> @trunc_lshr_v4i64_bitcast(<4 x i64> %a0) {
+; SSE2-LABEL: trunc_lshr_v4i64_bitcast:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psrlq $49, %xmm1
+; SSE2-NEXT:    psrlq $49, %xmm0
+; SSE2-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: trunc_lshr_v4i64_bitcast:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    psrlq $49, %xmm1
+; SSE4-NEXT:    psrlq $49, %xmm0
+; SSE4-NEXT:    packusdw %xmm1, %xmm0
+; SSE4-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: trunc_lshr_v4i64_bitcast:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrlq $49, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $49, %xmm0, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX2-LABEL: trunc_lshr_v4i64_bitcast:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrlq $49, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    ret{{[l|q]}}
+  %1 = lshr <4 x i64> %a0, <i64 49, i64 49, i64 49, i64 49>
+  %2 = bitcast <4 x i64> %1 to <8 x i32>
+  %3 = trunc <8 x i32> %2 to <8 x i16>
+  ret <8 x i16> %3
+}
+
+define <8 x i16> @trunc_lshr_v8i32(<8 x i32> %a) nounwind {
+; SSE2-LABEL: trunc_lshr_v8i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psrld $31, %xmm1
+; SSE2-NEXT:    psrld $31, %xmm0
+; SSE2-NEXT:    packuswb %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: trunc_lshr_v8i32:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    psrld $31, %xmm1
+; SSE4-NEXT:    psrld $31, %xmm0
+; SSE4-NEXT:    packusdw %xmm1, %xmm0
+; SSE4-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: trunc_lshr_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
+; AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX2-LABEL: trunc_lshr_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpsrld $31, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    ret{{[l|q]}}
+  %1 = lshr <8 x i32> %a, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
+  %2 = trunc <8 x i32> %1 to <8 x i16>
+  ret <8 x i16> %2
+}
+
+define <8 x i16> @trunc_lshr_v4i64_demandedelts(<4 x i64> %a0) {
+; X86-SSE2-LABEL: trunc_lshr_v4i64_demandedelts:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,1,1]
+; X86-SSE2-NEXT:    pand %xmm2, %xmm1
+; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-SSE2-NEXT:    pand %xmm2, %xmm0
+; X86-SSE2-NEXT:    packuswb %xmm1, %xmm0
+; X86-SSE2-NEXT:    retl
+;
+; X64-SSE2-LABEL: trunc_lshr_v4i64_demandedelts:
+; X64-SSE2:       # %bb.0:
+; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,18446744073709551615]
+; X64-SSE2-NEXT:    pand %xmm2, %xmm0
+; X64-SSE2-NEXT:    pand %xmm2, %xmm1
+; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-SSE2-NEXT:    packuswb %xmm1, %xmm0
+; X64-SSE2-NEXT:    retq
+;
+; X86-SSE4-LABEL: trunc_lshr_v4i64_demandedelts:
+; X86-SSE4:       # %bb.0:
+; X86-SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X86-SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,1,1]
+; X86-SSE4-NEXT:    pand %xmm2, %xmm1
+; X86-SSE4-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-SSE4-NEXT:    pand %xmm2, %xmm0
+; X86-SSE4-NEXT:    packusdw %xmm1, %xmm0
+; X86-SSE4-NEXT:    retl
+;
+; X64-SSE4-LABEL: trunc_lshr_v4i64_demandedelts:
+; X64-SSE4:       # %bb.0:
+; X64-SSE4-NEXT:    movdqa {{.*#+}} xmm2 = [1,18446744073709551615]
+; X64-SSE4-NEXT:    pand %xmm2, %xmm0
+; X64-SSE4-NEXT:    pand %xmm2, %xmm1
+; X64-SSE4-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; X64-SSE4-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-SSE4-NEXT:    packusdw %xmm1, %xmm0
+; X64-SSE4-NEXT:    retq
+;
+; X86-AVX1-LABEL: trunc_lshr_v4i64_demandedelts:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vzeroupper
+; X86-AVX1-NEXT:    retl
+;
+; X64-AVX1-LABEL: trunc_lshr_v4i64_demandedelts:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vshufps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX1-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vzeroupper
+; X64-AVX1-NEXT:    retq
+;
+; AVX2-LABEL: trunc_lshr_v4i64_demandedelts:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1]
+; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    ret{{[l|q]}}
+  %1 = shl <4 x i64> %a0, <i64 63, i64 0, i64 63, i64 0>
+  %2 = lshr <4 x i64> %1, <i64 63, i64 0, i64 63, i64 0>
+  %3 = bitcast <4 x i64> %2 to <8 x i32>
+  %4 = shufflevector <8 x i32> %3, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
+  %5 = trunc <8 x i32> %4 to <8 x i16>
+  ret <8 x i16> %5
+}
+
+define <16 x i8> @shuffle_lshr_2v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: shuffle_lshr_2v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $15, %xmm0
+; SSE-NEXT:    psrlw $15, %xmm1
+; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: shuffle_lshr_2v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $15, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $15, %xmm1, %xmm1
+; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
+  %lshr0 = lshr <8 x i16> %a0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  %lshr1 = lshr <8 x i16> %a1, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+  %bc0 = bitcast <8 x i16> %lshr0 to <16 x i8>
+  %bc1 = bitcast <8 x i16> %lshr1 to <16 x i8>
+  %res = shufflevector <16 x i8> %bc0, <16 x i8> %bc1, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+  ret <16 x i8> %res
+}
+
+define <8 x i16> @shuffle_lshr_2v4i32(<4 x i32> %a0, <4 x i32> %a1) {
+; SSE2-LABEL: shuffle_lshr_2v4i32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    psrld $31, %xmm0
+; SSE2-NEXT:    psrld $31, %xmm1
+; SSE2-NEXT:    packssdw %xmm1, %xmm0
+; SSE2-NEXT:    ret{{[l|q]}}
+;
+; SSE4-LABEL: shuffle_lshr_2v4i32:
+; SSE4:       # %bb.0:
+; SSE4-NEXT:    psrld $31, %xmm0
+; SSE4-NEXT:    psrld $31, %xmm1
+; SSE4-NEXT:    packusdw %xmm1, %xmm0
+; SSE4-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: shuffle_lshr_2v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrld $31, %xmm0, %xmm0
+; AVX-NEXT:    vpsrld $31, %xmm1, %xmm1
+; AVX-NEXT:    vpackusdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
+  %lshr0 = lshr <4 x i32> %a0, <i32 31, i32 31, i32 31, i32 31>
+  %lshr1 = lshr <4 x i32> %a1, <i32 31, i32 31, i32 31, i32 31>
+  %bc0 = bitcast <4 x i32> %lshr0 to <8 x i16>
+  %bc1 = bitcast <4 x i32> %lshr1 to <8 x i16>
+  %res = shufflevector <8 x i16> %bc0, <8 x i16> %bc1, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+  ret <8 x i16> %res
+}
+
+define <4 x i32> @shuffle_lshr_2v2i64(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: shuffle_lshr_2v2i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $63, %xmm0
+; SSE-NEXT:    psrlq $63, %xmm1
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: shuffle_lshr_2v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $63, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlq $63, %xmm1, %xmm1
+; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
+  %lshr0 = lshr <2 x i64> %a0, <i64 63, i64 63>
+  %lshr1 = lshr <2 x i64> %a1, <i64 63, i64 63>
+  %bc0 = bitcast <2 x i64> %lshr0 to <4 x i32>
+  %bc1 = bitcast <2 x i64> %lshr1 to <4 x i32>
+  %res = shufflevector <4 x i32> %bc0, <4 x i32> %bc1, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  ret <4 x i32> %res
+}
+
+define <4 x float> @shuffle_lshr_2v2i64_bitcast(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: shuffle_lshr_2v2i64_bitcast:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlq $63, %xmm0
+; SSE-NEXT:    psrlq $63, %xmm1
+; SSE-NEXT:    packssdw %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: shuffle_lshr_2v2i64_bitcast:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlq $63, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlq $63, %xmm1, %xmm1
+; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
+  %lshr0 = lshr <2 x i64> %a0, <i64 63, i64 63>
+  %lshr1 = lshr <2 x i64> %a1, <i64 63, i64 63>
+  %bc0 = bitcast <2 x i64> %lshr0 to <4 x float>
+  %bc1 = bitcast <2 x i64> %lshr1 to <4 x float>
+  %res = shufflevector <4 x float> %bc0, <4 x float> %bc1, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+  ret <4 x float> %res
+}
+
+define <16 x i8> @packuswb_icmp_zero_128(<8 x i16> %a0) {
+; X86-SSE-LABEL: packuswb_icmp_zero_128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pxor %xmm1, %xmm1
+; X86-SSE-NEXT:    pcmpeqw %xmm0, %xmm1
+; X86-SSE-NEXT:    packsswb %xmm1, %xmm1
+; X86-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE-NEXT:    movq {{.*#+}} xmm0 = xmm1[0],zero
+; X86-SSE-NEXT:    retl
+;
+; X64-SSE-LABEL: packuswb_icmp_zero_128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pxor %xmm1, %xmm1
+; X64-SSE-NEXT:    pcmpeqw %xmm0, %xmm1
+; X64-SSE-NEXT:    packsswb %xmm1, %xmm1
+; X64-SSE-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; X64-SSE-NEXT:    movq {{.*#+}} xmm0 = xmm1[0],zero
+; X64-SSE-NEXT:    retq
+;
+; X86-AVX-LABEL: packuswb_icmp_zero_128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; X86-AVX-NEXT:    retl
+;
+; X64-AVX-LABEL: packuswb_icmp_zero_128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vmovq {{.*#+}} xmm0 = xmm0[0],zero
+; X64-AVX-NEXT:    retq
+  %1 = icmp eq <8 x i16> %a0, zeroinitializer
+  %2 = zext <8 x i1> %1 to <8 x i8>
+  %3 = shufflevector <8 x i8> %2, <8 x i8> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %3
+}
+
+define <16 x i8> @packuswb_icmp_zero_trunc_128(<8 x i16> %a0) {
+; SSE-LABEL: packuswb_icmp_zero_trunc_128:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm1, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm1, %xmm0
+; SSE-NEXT:    psrlw $15, %xmm0
+; SSE-NEXT:    packuswb %xmm1, %xmm0
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX-LABEL: packuswb_icmp_zero_trunc_128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $15, %xmm0, %xmm0
+; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    ret{{[l|q]}}
+  %1 = icmp eq <8 x i16> %a0, zeroinitializer
+  %2 = zext <8 x i1> %1 to <8 x i16>
+  %3 = shufflevector <8 x i16> %2, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %4 = trunc <16 x i16> %3 to <16 x i8>
+  ret <16 x i8> %4
+}
+
+define <32 x i8> @packuswb_icmp_zero_256(<16 x i16> %a0) {
+; SSE-LABEL: packuswb_icmp_zero_256:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pcmpeqw %xmm2, %xmm1
+; SSE-NEXT:    psrlw $15, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm2, %xmm0
+; SSE-NEXT:    psrlw $15, %xmm0
+; SSE-NEXT:    pxor %xmm3, %xmm3
+; SSE-NEXT:    packuswb %xmm0, %xmm3
+; SSE-NEXT:    packuswb %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    movdqa %xmm2, %xmm1
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; X86-AVX1-LABEL: packuswb_icmp_zero_256:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X86-AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm1
+; X86-AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-AVX1-NEXT:    vpackuswb %xmm1, %xmm2, %xmm1
+; X86-AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; X86-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-AVX1-NEXT:    retl
+;
+; X64-AVX1-LABEL: packuswb_icmp_zero_256:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; X64-AVX1-NEXT:    vpcmpeqw %xmm2, %xmm1, %xmm1
+; X64-AVX1-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; X64-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X64-AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; X64-AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-AVX1-NEXT:    vpackuswb %xmm1, %xmm2, %xmm1
+; X64-AVX1-NEXT:    vpackuswb %xmm0, %xmm2, %xmm0
+; X64-AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X64-AVX1-NEXT:    retq
+;
+; AVX2-LABEL: packuswb_icmp_zero_256:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $15, %ymm0, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    ret{{[l|q]}}
+  %1 = icmp eq <16 x i16> %a0, zeroinitializer
+  %2 = zext <16 x i1> %1 to <16 x i16>
+  %3 = bitcast <16 x i16> %2 to <32 x i8>
+  %4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
+  ret <32 x i8> %4
+}
+
+define <32 x i8> @packuswb_icmp_zero_trunc_256(<16 x i16> %a0) {
+; SSE-LABEL: packuswb_icmp_zero_trunc_256:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pcmpeqw %xmm2, %xmm1
+; SSE-NEXT:    psrlw $15, %xmm1
+; SSE-NEXT:    pcmpeqw %xmm2, %xmm0
+; SSE-NEXT:    psrlw $15, %xmm0
+; SSE-NEXT:    pxor %xmm3, %xmm3
+; SSE-NEXT:    packuswb %xmm0, %xmm3
+; SSE-NEXT:    packuswb %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm3, %xmm0
+; SSE-NEXT:    movdqa %xmm2, %xmm1
+; SSE-NEXT:    ret{{[l|q]}}
+;
+; AVX1-LABEL: packuswb_icmp_zero_trunc_256:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $15, %xmm0, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsrlw $15, %xmm2, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    ret{{[l|q]}}
+;
+; AVX2-LABEL: packuswb_icmp_zero_trunc_256:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $15, %ymm0, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    ret{{[l|q]}}
+  %1 = icmp eq <16 x i16> %a0, zeroinitializer
+  %2 = zext <16 x i1> %1 to <16 x i16>
+  %3 = shufflevector <16 x i16> zeroinitializer, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %4 = trunc <32 x i16> %3 to <32 x i8>
+  ret <32 x i8> %4
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64-AVX2: {{.*}}
+; X86-AVX2: {{.*}}


        


More information about the llvm-commits mailing list