[llvm] dd7ba38 - [X86] matchTruncateWithPACK - consistently prefer shuffles for truncation to sub-64-bit vXi16
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 17 04:23:49 PDT 2023
Author: Simon Pilgrim
Date: 2023-08-17T12:23:28+01:00
New Revision: dd7ba3807865d41afccd2172c53999d5d13ceace
URL: https://github.com/llvm/llvm-project/commit/dd7ba3807865d41afccd2172c53999d5d13ceace
DIFF: https://github.com/llvm/llvm-project/commit/dd7ba3807865d41afccd2172c53999d5d13ceace.diff
LOG: [X86] matchTruncateWithPACK - consistently prefer shuffles for truncation to sub-64-bit vXi16
If we're truncating from v2i32 / v2i64 then PSHUFLW / PSHUFD+PSHUFLW should more easily allow further shuffle combines than a PACK chain will
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
llvm/test/CodeGen/X86/vector-trunc-packus.ll
llvm/test/CodeGen/X86/vector-trunc-ssat.ll
llvm/test/CodeGen/X86/vector-trunc-usat.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 85ccf62fe58f0b..4ff97905fe00ee 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -20134,12 +20134,12 @@ static SDValue matchTruncateWithPACK(unsigned &PackOpcode, EVT DstVT,
assert(SrcSVT.getSizeInBits() > DstSVT.getSizeInBits() && "Bad truncation");
unsigned NumStages = Log2_32(SrcSVT.getSizeInBits() / DstSVT.getSizeInBits());
- // Truncation to sub-128bit vXi32 can be better handled with shuffles.
- if (DstSVT == MVT::i32 && SrcVT.getSizeInBits() <= 128)
- return SDValue();
-
+ // Truncation from 128-bit to vXi32 can be better handled with PSHUFD.
+ // Truncation to sub-64-bit vXi16 can be better handled with PSHUFD/PSHUFLW.
// Truncation from v2i64 to v2i8 can be better handled with PSHUFB.
- if (DstVT == MVT::v2i8 && SrcVT == MVT::v2i64 && Subtarget.hasSSSE3())
+ if ((DstSVT == MVT::i32 && SrcVT.getSizeInBits() <= 128) ||
+ (DstSVT == MVT::i16 && SrcVT.getSizeInBits() <= (64 * NumStages)) ||
+ (DstVT == MVT::v2i8 && SrcVT == MVT::v2i64 && Subtarget.hasSSSE3()))
return SDValue();
// Prefer to lower v4i64 -> v4i32 as a shuffle unless we can cheaply
diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
index a5cc0018f49592..e07312c902d197 100644
--- a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
+++ b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
@@ -2524,15 +2524,15 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
; SSE2-NEXT: pcmpeqd %xmm0, %xmm4
; SSE2-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSE2-NEXT: por %xmm6, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT: por %xmm5, %xmm0
-; SSE2-NEXT: packssdw %xmm0, %xmm0
-; SSE2-NEXT: packssdw %xmm0, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,2,2]
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: por %xmm0, %xmm3
+; SSE2-NEXT: pand %xmm3, %xmm5
+; SSE2-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE2-NEXT: por %xmm5, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pcmpeqd %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,0,3,2]
; SSE2-NEXT: pand %xmm1, %xmm2
@@ -2567,8 +2567,8 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; SSE4-NEXT: movapd %xmm4, %xmm0
; SSE4-NEXT: pcmpgtq %xmm2, %xmm0
; SSE4-NEXT: blendvpd %xmm0, %xmm4, %xmm2
-; SSE4-NEXT: packssdw %xmm2, %xmm2
-; SSE4-NEXT: packssdw %xmm2, %xmm2
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE4-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE4-NEXT: pcmpeqq %xmm1, %xmm3
; SSE4-NEXT: movmskpd %xmm3, %eax
; SSE4-NEXT: xorl $3, %eax
@@ -2580,11 +2580,11 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; SSE4-NEXT: .LBB7_4: # %else2
; SSE4-NEXT: retq
; SSE4-NEXT: .LBB7_1: # %cond.store
-; SSE4-NEXT: pextrw $0, %xmm2, (%rdi)
+; SSE4-NEXT: pextrw $0, %xmm0, (%rdi)
; SSE4-NEXT: testb $2, %al
; SSE4-NEXT: je .LBB7_4
; SSE4-NEXT: .LBB7_3: # %cond.store1
-; SSE4-NEXT: pextrw $1, %xmm2, 2(%rdi)
+; SSE4-NEXT: pextrw $1, %xmm0, 2(%rdi)
; SSE4-NEXT: retq
;
; AVX1-LABEL: truncstore_v2i64_v2i16:
@@ -2598,8 +2598,8 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; AVX1-NEXT: # xmm3 = mem[0,0]
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm4
; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovmskpd %xmm1, %eax
; AVX1-NEXT: xorl $3, %eax
@@ -2627,8 +2627,8 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [18446744073709518848,18446744073709518848]
; AVX2-NEXT: vpcmpgtq %xmm3, %xmm0, %xmm4
; AVX2-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovmskpd %xmm1, %eax
; AVX2-NEXT: xorl $3, %eax
diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
index 3f87bca79a00ed..d4ecb53b839970 100644
--- a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
+++ b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
@@ -2205,17 +2205,17 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; SSE4-LABEL: truncstore_v2i64_v2i16:
; SSE4: # %bb.0:
; SSE4-NEXT: movdqa %xmm0, %xmm2
-; SSE4-NEXT: pxor %xmm4, %xmm4
-; SSE4-NEXT: movapd {{.*#+}} xmm3 = [65535,65535]
+; SSE4-NEXT: pxor %xmm3, %xmm3
+; SSE4-NEXT: movapd {{.*#+}} xmm4 = [65535,65535]
; SSE4-NEXT: movdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808]
; SSE4-NEXT: pxor %xmm0, %xmm5
; SSE4-NEXT: movdqa {{.*#+}} xmm0 = [9223372036854841343,9223372036854841343]
; SSE4-NEXT: pcmpgtq %xmm5, %xmm0
-; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm3
-; SSE4-NEXT: packusdw %xmm3, %xmm3
-; SSE4-NEXT: packusdw %xmm3, %xmm3
-; SSE4-NEXT: pcmpeqq %xmm1, %xmm4
-; SSE4-NEXT: movmskpd %xmm4, %eax
+; SSE4-NEXT: blendvpd %xmm0, %xmm2, %xmm4
+; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm4[0,2,2,3]
+; SSE4-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE4-NEXT: pcmpeqq %xmm1, %xmm3
+; SSE4-NEXT: movmskpd %xmm3, %eax
; SSE4-NEXT: xorl $3, %eax
; SSE4-NEXT: testb $1, %al
; SSE4-NEXT: jne .LBB7_1
@@ -2225,11 +2225,11 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; SSE4-NEXT: .LBB7_4: # %else2
; SSE4-NEXT: retq
; SSE4-NEXT: .LBB7_1: # %cond.store
-; SSE4-NEXT: pextrw $0, %xmm3, (%rdi)
+; SSE4-NEXT: pextrw $0, %xmm0, (%rdi)
; SSE4-NEXT: testb $2, %al
; SSE4-NEXT: je .LBB7_4
; SSE4-NEXT: .LBB7_3: # %cond.store1
-; SSE4-NEXT: pextrw $1, %xmm3, 2(%rdi)
+; SSE4-NEXT: pextrw $1, %xmm0, 2(%rdi)
; SSE4-NEXT: retq
;
; AVX1-LABEL: truncstore_v2i64_v2i16:
@@ -2242,8 +2242,8 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; AVX1-NEXT: # xmm5 = mem[0,0]
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovmskpd %xmm1, %eax
; AVX1-NEXT: xorl $3, %eax
@@ -2271,8 +2271,8 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %p, <2 x i64> %mask) {
; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343]
; AVX2-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
; AVX2-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX2-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovmskpd %xmm1, %eax
; AVX2-NEXT: xorl $3, %eax
diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
index 8032c8df2bda84..d6b200a1e268a6 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
@@ -910,7 +910,7 @@ define <2 x i16> @trunc_packus_v2i64_v2i16(<2 x i64> %a0) {
; AVX2-FAST-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-FAST-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm1
; AVX2-FAST-NEXT: vpand %xmm0, %xmm1, %xmm0
-; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_packus_v2i64_v2i16:
diff --git a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
index 6fa548ae93cff0..1f8572f6c1b897 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll
@@ -825,21 +825,21 @@ define <2 x i16> @trunc_ssat_v2i64_v2i16(<2 x i64> %a0) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm3
; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
; SSE2-SSSE3-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
-; SSE2-SSSE3-NEXT: por %xmm3, %xmm0
-; SSE2-SSSE3-NEXT: pxor %xmm0, %xmm1
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm3
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm3
+; SSE2-SSSE3-NEXT: pxor %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm2, %xmm2
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,2,2]
-; SSE2-SSSE3-NEXT: pand %xmm3, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,0,2,2]
+; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-SSSE3-NEXT: por %xmm2, %xmm1
-; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pand %xmm1, %xmm3
; SSE2-SSSE3-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-SSSE3-NEXT: por %xmm1, %xmm0
-; SSE2-SSSE3-NEXT: packssdw %xmm0, %xmm0
-; SSE2-SSSE3-NEXT: packssdw %xmm0, %xmm0
+; SSE2-SSSE3-NEXT: por %xmm3, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE2-SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc_ssat_v2i64_v2i16:
@@ -866,9 +866,8 @@ define <2 x i16> @trunc_ssat_v2i64_v2i16(<2 x i64> %a0) {
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm3, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: packssdw %xmm1, %xmm1
-; SSE41-NEXT: packssdw %xmm1, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_ssat_v2i64_v2i16:
@@ -881,21 +880,32 @@ define <2 x i16> @trunc_ssat_v2i64_v2i16(<2 x i64> %a0) {
; AVX1-NEXT: # xmm1 = mem[0,0]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: trunc_ssat_v2i64_v2i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [32767,32767]
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744073709518848,18446744073709518848]
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: retq
+; AVX2-SLOW-LABEL: trunc_ssat_v2i64_v2i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [32767,32767]
+; AVX2-SLOW-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX2-SLOW-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744073709518848,18446744073709518848]
+; AVX2-SLOW-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-SLOW-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_ssat_v2i64_v2i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm1 = [32767,32767]
+; AVX2-FAST-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX2-FAST-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744073709518848,18446744073709518848]
+; AVX2-FAST-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-FAST-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_ssat_v2i64_v2i16:
; AVX512F: # %bb.0:
@@ -963,9 +973,9 @@ define void @trunc_ssat_v2i64_v2i16_store(<2 x i64> %a0, ptr%p1) {
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm3
; SSE2-SSSE3-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-SSSE3-NEXT: por %xmm3, %xmm1
-; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm1
-; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm1
-; SSE2-SSSE3-NEXT: movd %xmm1, (%rdi)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; SSE2-SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE2-SSSE3-NEXT: movd %xmm0, (%rdi)
; SSE2-SSSE3-NEXT: retq
;
; SSE41-LABEL: trunc_ssat_v2i64_v2i16_store:
@@ -992,9 +1002,9 @@ define void @trunc_ssat_v2i64_v2i16_store(<2 x i64> %a0, ptr%p1) {
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: por %xmm3, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: packssdw %xmm1, %xmm1
-; SSE41-NEXT: packssdw %xmm1, %xmm1
-; SSE41-NEXT: movd %xmm1, (%rdi)
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: movd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_ssat_v2i64_v2i16_store:
@@ -1007,23 +1017,35 @@ define void @trunc_ssat_v2i64_v2i16_store(<2 x i64> %a0, ptr%p1) {
; AVX1-NEXT: # xmm1 = mem[0,0]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vmovd %xmm0, (%rdi)
; AVX1-NEXT: retq
;
-; AVX2-LABEL: trunc_ssat_v2i64_v2i16_store:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [32767,32767]
-; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744073709518848,18446744073709518848]
-; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, (%rdi)
-; AVX2-NEXT: retq
+; AVX2-SLOW-LABEL: trunc_ssat_v2i64_v2i16_store:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [32767,32767]
+; AVX2-SLOW-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX2-SLOW-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744073709518848,18446744073709518848]
+; AVX2-SLOW-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-SLOW-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: vmovd %xmm0, (%rdi)
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_ssat_v2i64_v2i16_store:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm1 = [32767,32767]
+; AVX2-FAST-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX2-FAST-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm1 = [18446744073709518848,18446744073709518848]
+; AVX2-FAST-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-FAST-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT: vmovd %xmm0, (%rdi)
+; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_ssat_v2i64_v2i16_store:
; AVX512F: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vector-trunc-usat.ll b/llvm/test/CodeGen/X86/vector-trunc-usat.ll
index fded69f955ccd2..12803e2df57192 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-usat.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-usat.ll
@@ -626,9 +626,8 @@ define <2 x i16> @trunc_usat_v2i64_v2i16(<2 x i64> %a0) {
; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: packusdw %xmm2, %xmm2
-; SSE41-NEXT: packusdw %xmm2, %xmm2
-; SSE41-NEXT: movdqa %xmm2, %xmm0
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_usat_v2i64_v2i16:
@@ -640,21 +639,32 @@ define <2 x i16> @trunc_usat_v2i64_v2i16(<2 x i64> %a0) {
; AVX1-NEXT: # xmm3 = mem[0,0]
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: trunc_usat_v2i64_v2i16:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = [65535,65535]
-; AVX2-NEXT: # xmm1 = mem[0,0]
-; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: retq
+; AVX2-SLOW-LABEL: trunc_usat_v2i64_v2i16:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = [65535,65535]
+; AVX2-SLOW-NEXT: # xmm1 = mem[0,0]
+; AVX2-SLOW-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
+; AVX2-SLOW-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-SLOW-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_usat_v2i64_v2i16:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm1 = [65535,65535]
+; AVX2-FAST-NEXT: # xmm1 = mem[0,0]
+; AVX2-FAST-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
+; AVX2-FAST-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-FAST-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,8,9,10,11,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_usat_v2i64_v2i16:
; AVX512F: # %bb.0:
@@ -722,9 +732,9 @@ define void @trunc_usat_v2i64_v2i16_store(<2 x i64> %a0, ptr %p1) {
; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE41-NEXT: pand %xmm4, %xmm0
; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: packusdw %xmm2, %xmm2
-; SSE41-NEXT: packusdw %xmm2, %xmm2
-; SSE41-NEXT: movd %xmm2, (%rdi)
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; SSE41-NEXT: movd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_usat_v2i64_v2i16_store:
@@ -736,23 +746,35 @@ define void @trunc_usat_v2i64_v2i16_store(<2 x i64> %a0, ptr %p1) {
; AVX1-NEXT: # xmm3 = mem[0,0]
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; AVX1-NEXT: vmovd %xmm0, (%rdi)
; AVX1-NEXT: retq
;
-; AVX2-LABEL: trunc_usat_v2i64_v2i16_store:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = [65535,65535]
-; AVX2-NEXT: # xmm1 = mem[0,0]
-; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
-; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
-; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
-; AVX2-NEXT: vmovd %xmm0, (%rdi)
-; AVX2-NEXT: retq
+; AVX2-SLOW-LABEL: trunc_usat_v2i64_v2i16_store:
+; AVX2-SLOW: # %bb.0:
+; AVX2-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = [65535,65535]
+; AVX2-SLOW-NEXT: # xmm1 = mem[0,0]
+; AVX2-SLOW-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
+; AVX2-SLOW-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-SLOW-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: vmovd %xmm0, (%rdi)
+; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: trunc_usat_v2i64_v2i16_store:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vmovddup {{.*#+}} xmm1 = [65535,65535]
+; AVX2-FAST-NEXT: # xmm1 = mem[0,0]
+; AVX2-FAST-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm2
+; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [9223372036854841343,9223372036854841343]
+; AVX2-FAST-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-FAST-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT: vmovd %xmm0, (%rdi)
+; AVX2-FAST-NEXT: retq
;
; AVX512F-LABEL: trunc_usat_v2i64_v2i16_store:
; AVX512F: # %bb.0:
More information about the llvm-commits
mailing list