[llvm] [DAG] combineVSelectWithAllOnesOrZeros - missing freeze (PR #150388)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 24 01:33:00 PDT 2025
https://github.com/woruyu created https://github.com/llvm/llvm-project/pull/150388
None
>From 7d37776d73ae6be093faed68932b56c97288c691 Mon Sep 17 00:00:00 2001
From: woruyu <1214539920 at qq.com>
Date: Thu, 24 Jul 2025 16:32:26 +0800
Subject: [PATCH] fix: add freeze for combineVSelectWithAllOnesOrZeros
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 8 +-
llvm/test/CodeGen/AArch64/vselect-ext.ll | 48 +-
llvm/test/CodeGen/X86/avg-mask.ll | 16 +-
llvm/test/CodeGen/X86/avx512-ext.ll | 36 +-
llvm/test/CodeGen/X86/pr78897.ll | 2 +-
llvm/test/CodeGen/X86/sqrt-fastmath.ll | 36 +-
llvm/test/CodeGen/X86/ushl_sat_vec.ll | 2 +-
llvm/test/CodeGen/X86/var-permute-128.ll | 986 +++++++++--
llvm/test/CodeGen/X86/var-permute-256.ll | 1562 +++++++++++++++--
llvm/test/CodeGen/X86/vector-bo-select.ll | 536 +++---
10 files changed, 2575 insertions(+), 657 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d3df43473013e..7e141318a1e75 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -13165,14 +13165,14 @@ static SDValue combineVSelectWithAllOnesOrZeros(SDValue Cond, SDValue TVal,
// select Cond, -1, x → or Cond, x
if (IsTAllOne) {
SDValue X = DAG.getBitcast(CondVT, FVal);
- SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, X);
+ SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, DAG.getFreeze(X));
return DAG.getBitcast(VT, Or);
}
// select Cond, x, 0 → and Cond, x
if (IsFAllZero) {
SDValue X = DAG.getBitcast(CondVT, TVal);
- SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, X);
+ SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, DAG.getFreeze(X));
return DAG.getBitcast(VT, And);
}
@@ -13180,8 +13180,8 @@ static SDValue combineVSelectWithAllOnesOrZeros(SDValue Cond, SDValue TVal,
if (IsTAllZero &&
(isBitwiseNot(peekThroughBitcasts(Cond)) || TLI.hasAndNot(Cond))) {
SDValue X = DAG.getBitcast(CondVT, FVal);
- SDValue And =
- DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT), X);
+ SDValue And = DAG.getNode(ISD::AND, DL, CondVT,
+ DAG.getNOT(DL, Cond, CondVT), DAG.getFreeze(X));
return DAG.getBitcast(VT, And);
}
diff --git a/llvm/test/CodeGen/AArch64/vselect-ext.ll b/llvm/test/CodeGen/AArch64/vselect-ext.ll
index 4f2b9c5a62669..9fe8c50640981 100644
--- a/llvm/test/CodeGen/AArch64/vselect-ext.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-ext.ll
@@ -175,12 +175,12 @@ define <8 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i1
; CHECK: ; %bb.0:
; CHECK-NEXT: bic.8h v0, #128, lsl #8
; CHECK-NEXT: movi.4s v1, #10
-; CHECK-NEXT: ushll2.4s v2, v0, #0
-; CHECK-NEXT: ushll.4s v0, v0, #0
-; CHECK-NEXT: cmhi.4s v3, v0, v1
-; CHECK-NEXT: cmhi.4s v1, v2, v1
-; CHECK-NEXT: and.16b v1, v1, v2
-; CHECK-NEXT: and.16b v0, v3, v0
+; CHECK-NEXT: ushll.4s v2, v0, #0
+; CHECK-NEXT: ushll2.4s v0, v0, #0
+; CHECK-NEXT: cmhi.4s v3, v2, v1
+; CHECK-NEXT: cmhi.4s v1, v0, v1
+; CHECK-NEXT: and.16b v1, v1, v0
+; CHECK-NEXT: and.16b v0, v3, v2
; CHECK-NEXT: ret
%ext = zext <8 x i15> %a to <8 x i32>
%cmp = icmp ugt <8 x i15> %a, <i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10>
@@ -289,12 +289,12 @@ define <8 x i32> @same_zext_used_in_cmp_eq_and_select_v8i32_from_v8i13(<8 x i13>
; CHECK: ; %bb.0:
; CHECK-NEXT: bic.8h v0, #224, lsl #8
; CHECK-NEXT: movi.4s v1, #10
-; CHECK-NEXT: ushll2.4s v2, v0, #0
-; CHECK-NEXT: ushll.4s v0, v0, #0
-; CHECK-NEXT: cmeq.4s v3, v0, v1
-; CHECK-NEXT: cmeq.4s v1, v2, v1
-; CHECK-NEXT: and.16b v1, v1, v2
-; CHECK-NEXT: and.16b v0, v3, v0
+; CHECK-NEXT: ushll.4s v2, v0, #0
+; CHECK-NEXT: ushll2.4s v0, v0, #0
+; CHECK-NEXT: cmeq.4s v3, v2, v1
+; CHECK-NEXT: cmeq.4s v1, v0, v1
+; CHECK-NEXT: and.16b v1, v1, v0
+; CHECK-NEXT: and.16b v0, v3, v2
; CHECK-NEXT: ret
%ext = zext <8 x i13> %a to <8 x i32>
%cmp = icmp eq <8 x i13> %a, <i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10>
@@ -429,17 +429,17 @@ define <8 x i32> @same_sext_used_in_cmp_eq_and_select_v8i32(<8 x i16> %a) {
define <8 x i32> @same_sext_used_in_cmp_eq_and_select_v8i32_from_v8i13(<8 x i13> %a) {
; CHECK-LABEL: same_sext_used_in_cmp_eq_and_select_v8i32_from_v8i13:
; CHECK: ; %bb.0:
-; CHECK-NEXT: ushll.4s v2, v0, #0
-; CHECK-NEXT: ushll2.4s v0, v0, #0
+; CHECK-NEXT: ushll2.4s v2, v0, #0
+; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: movi.4s v1, #10
; CHECK-NEXT: shl.4s v0, v0, #19
; CHECK-NEXT: shl.4s v2, v2, #19
; CHECK-NEXT: sshr.4s v0, v0, #19
; CHECK-NEXT: sshr.4s v2, v2, #19
-; CHECK-NEXT: cmeq.4s v3, v2, v1
-; CHECK-NEXT: cmeq.4s v1, v0, v1
-; CHECK-NEXT: and.16b v1, v1, v0
-; CHECK-NEXT: and.16b v0, v3, v2
+; CHECK-NEXT: cmeq.4s v3, v0, v1
+; CHECK-NEXT: cmeq.4s v1, v2, v1
+; CHECK-NEXT: and.16b v1, v1, v2
+; CHECK-NEXT: and.16b v0, v3, v0
; CHECK-NEXT: ret
%ext = sext <8 x i13> %a to <8 x i32>
%cmp = icmp eq <8 x i13> %a, <i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10, i13 10>
@@ -493,17 +493,17 @@ entry:
define <8 x i32> @same_sext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i15(<8 x i15> %a) {
; CHECK-LABEL: same_sext_used_in_cmp_unsigned_pred_and_select_v8i32_from_v8i15:
; CHECK: ; %bb.0:
-; CHECK-NEXT: ushll.4s v2, v0, #0
-; CHECK-NEXT: ushll2.4s v0, v0, #0
+; CHECK-NEXT: ushll2.4s v2, v0, #0
+; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: movi.4s v1, #10
; CHECK-NEXT: shl.4s v0, v0, #17
; CHECK-NEXT: shl.4s v2, v2, #17
; CHECK-NEXT: sshr.4s v0, v0, #17
; CHECK-NEXT: sshr.4s v2, v2, #17
-; CHECK-NEXT: cmge.4s v3, v2, v1
-; CHECK-NEXT: cmge.4s v1, v0, v1
-; CHECK-NEXT: and.16b v1, v1, v0
-; CHECK-NEXT: and.16b v0, v3, v2
+; CHECK-NEXT: cmge.4s v3, v0, v1
+; CHECK-NEXT: cmge.4s v1, v2, v1
+; CHECK-NEXT: and.16b v1, v1, v2
+; CHECK-NEXT: and.16b v0, v3, v0
; CHECK-NEXT: ret
%ext = sext <8 x i15> %a to <8 x i32>
%cmp = icmp sge <8 x i15> %a, <i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10, i15 10>
diff --git a/llvm/test/CodeGen/X86/avg-mask.ll b/llvm/test/CodeGen/X86/avg-mask.ll
index b148cd3d42df6..e8866393e8b62 100644
--- a/llvm/test/CodeGen/X86/avg-mask.ll
+++ b/llvm/test/CodeGen/X86/avg-mask.ll
@@ -177,11 +177,11 @@ define <64 x i8> @avg_v64i8_maskz(<64 x i8> %a, <64 x i8> %b, i64 %mask) nounwin
; AVX512F-NEXT: shrq $32, %rdi
; AVX512F-NEXT: shrq $48, %rax
; AVX512F-NEXT: shrl $16, %ecx
-; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; AVX512F-NEXT: vpavgb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; AVX512F-NEXT: vpavgb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512F-NEXT: kmovw %ecx, %k2
; AVX512F-NEXT: kmovw %eax, %k3
; AVX512F-NEXT: kmovw %edi, %k4
@@ -364,11 +364,11 @@ define <32 x i16> @avg_v32i16_maskz(<32 x i16> %a, <32 x i16> %b, i32 %mask) nou
; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: shrl $16, %edi
-; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; AVX512F-NEXT: vpavgw %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm1
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; AVX512F-NEXT: vpavgw %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
; AVX512F-NEXT: kmovw %edi, %k2
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/avx512-ext.ll b/llvm/test/CodeGen/X86/avx512-ext.ll
index c60d9a3ff17d3..1a712ffac5b7e 100644
--- a/llvm/test/CodeGen/X86/avx512-ext.ll
+++ b/llvm/test/CodeGen/X86/avx512-ext.ll
@@ -6,7 +6,8 @@
define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: zext_8x8mem_to_8x16:
; KNL: # %bb.0:
-; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; KNL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -21,7 +22,8 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone
;
; AVX512DQNOBW-LABEL: zext_8x8mem_to_8x16:
; AVX512DQNOBW: # %bb.0:
-; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX512DQNOBW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; AVX512DQNOBW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -35,7 +37,8 @@ define <8 x i16> @zext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone
define <8 x i16> @sext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone {
; KNL-LABEL: sext_8x8mem_to_8x16:
; KNL: # %bb.0:
-; KNL-NEXT: vpmovsxbw (%rdi), %xmm1
+; KNL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; KNL-NEXT: vpmovsxbw %xmm1, %xmm1
; KNL-NEXT: vpsllw $15, %xmm0, %xmm0
; KNL-NEXT: vpsraw $15, %xmm0, %xmm0
; KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -50,7 +53,8 @@ define <8 x i16> @sext_8x8mem_to_8x16(ptr%i , <8 x i1> %mask) nounwind readnone
;
; AVX512DQNOBW-LABEL: sext_8x8mem_to_8x16:
; AVX512DQNOBW: # %bb.0:
-; AVX512DQNOBW-NEXT: vpmovsxbw (%rdi), %xmm1
+; AVX512DQNOBW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX512DQNOBW-NEXT: vpmovsxbw %xmm1, %xmm1
; AVX512DQNOBW-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX512DQNOBW-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -208,8 +212,10 @@ define <32 x i16> @zext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; KNL-NEXT: vmovdqu (%rdi), %ymm2
+; KNL-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2
+; KNL-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; KNL-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
; KNL-NEXT: vpsraw $15, %ymm0, %ymm0
@@ -231,8 +237,10 @@ define <32 x i16> @zext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512DQNOBW-NEXT: vmovdqu (%rdi), %ymm2
+; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQNOBW-NEXT: vpsllw $15, %ymm0, %ymm0
; AVX512DQNOBW-NEXT: vpsraw $15, %ymm0, %ymm0
@@ -253,8 +261,10 @@ define <32 x i16> @sext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm1
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; KNL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; KNL-NEXT: vpmovsxbw 16(%rdi), %ymm2
-; KNL-NEXT: vpmovsxbw (%rdi), %ymm3
+; KNL-NEXT: vmovdqu (%rdi), %ymm2
+; KNL-NEXT: vpmovsxbw %xmm2, %ymm3
+; KNL-NEXT: vextracti128 $1, %ymm2, %xmm2
+; KNL-NEXT: vpmovsxbw %xmm2, %ymm2
; KNL-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; KNL-NEXT: vpsllw $15, %ymm0, %ymm0
; KNL-NEXT: vpsraw $15, %ymm0, %ymm0
@@ -276,8 +286,10 @@ define <32 x i16> @sext_32x8mem_to_32x16(ptr%i , <32 x i1> %mask) nounwind readn
; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX512DQNOBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512DQNOBW-NEXT: vpmovsxbw 16(%rdi), %ymm2
-; AVX512DQNOBW-NEXT: vpmovsxbw (%rdi), %ymm3
+; AVX512DQNOBW-NEXT: vmovdqu (%rdi), %ymm2
+; AVX512DQNOBW-NEXT: vpmovsxbw %xmm2, %ymm3
+; AVX512DQNOBW-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512DQNOBW-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX512DQNOBW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQNOBW-NEXT: vpsllw $15, %ymm0, %ymm0
; AVX512DQNOBW-NEXT: vpsraw $15, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index 4613c2bcdcaf4..db77baa7ff8a3 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -22,7 +22,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
-; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
+; X86-SSE2-NEXT: movq {{.*#+}} xmm1 = [17,17,17,17,17,17,17,17,0,0,0,0,0,0,0,0]
; X86-SSE2-NEXT: pand %xmm0, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; X86-SSE2-NEXT: movd %xmm2, %esi
diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
index 5cd604c62a166..a260b325f7e3c 100644
--- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll
+++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll
@@ -410,34 +410,34 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 {
define <4 x float> @v4f32_estimate2(<4 x float> %x) #5 {
; SSE-LABEL: v4f32_estimate2:
; SSE: # %bb.0:
-; SSE-NEXT: rsqrtps %xmm0, %xmm2
-; SSE-NEXT: mulps %xmm0, %xmm2
-; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; SSE-NEXT: cmpleps %xmm0, %xmm1
-; SSE-NEXT: andps %xmm2, %xmm1
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; SSE-NEXT: andps %xmm0, %xmm1
+; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
+; SSE-NEXT: cmpleps %xmm1, %xmm2
+; SSE-NEXT: rsqrtps %xmm0, %xmm1
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: andps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: v4f32_estimate2:
; AVX1: # %bb.0:
-; AVX1-NEXT: vrsqrtps %xmm0, %xmm1
-; AVX1-NEXT: vmulps %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; AVX1-NEXT: vcmpleps %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vcmpleps %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vrsqrtps %xmm0, %xmm2
+; AVX1-NEXT: vmulps %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vandps %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: v4f32_estimate2:
; AVX512: # %bb.0:
-; AVX512-NEXT: vrsqrtps %xmm0, %xmm1
-; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN]
-; AVX512-NEXT: vandps %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN]
+; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38]
-; AVX512-NEXT: vcmpleps %xmm0, %xmm2, %xmm0
-; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: vcmpleps %xmm1, %xmm2, %xmm1
+; AVX512-NEXT: vrsqrtps %xmm0, %xmm2
+; AVX512-NEXT: vmulps %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vandps %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%sqrt = tail call fast <4 x float> @llvm.sqrt.v4f32(<4 x float> %x)
ret <4 x float> %sqrt
diff --git a/llvm/test/CodeGen/X86/ushl_sat_vec.ll b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
index ebb5e135eacd0..b8e83da9cf361 100644
--- a/llvm/test/CodeGen/X86/ushl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
@@ -281,7 +281,7 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-AVX2-NEXT: vpsllvd %ymm1, %ymm2, %ymm2
-; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u]
+; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; X64-AVX2-NEXT: vpsrlvd %ymm1, %ymm3, %ymm1
diff --git a/llvm/test/CodeGen/X86/var-permute-128.ll b/llvm/test/CodeGen/X86/var-permute-128.ll
index 7f4111e65cc17..e0335a8d2e06c 100644
--- a/llvm/test/CodeGen/X86/var-permute-128.ll
+++ b/llvm/test/CodeGen/X86/var-permute-128.ll
@@ -126,13 +126,15 @@ define <2 x i64> @var_shuffle_zero_v2i64(<2 x i64> %v, <2 x i64> %indices) nounw
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE41-NEXT: por %xmm3, %xmm2
; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pxor %xmm3, %xmm3
-; SSE41-NEXT: pcmpeqq %xmm1, %xmm3
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm4
-; SSE41-NEXT: pandn %xmm4, %xmm2
+; SSE41-NEXT: movq %xmm1, %rax
+; SSE41-NEXT: andl $1, %eax
+; SSE41-NEXT: pextrq $1, %xmm1, %rcx
+; SSE41-NEXT: andl $1, %ecx
+; SSE41-NEXT: movaps %xmm0, -24(%rsp)
+; SSE41-NEXT: movq -24(%rsp,%rcx,8), %xmm0 # xmm0 = mem[0],zero
+; SSE41-NEXT: movq -24(%rsp,%rax,8), %xmm1 # xmm1 = mem[0],zero
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; SSE41-NEXT: pandn %xmm1, %xmm2
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
@@ -140,8 +142,14 @@ define <2 x i64> @var_shuffle_zero_v2i64(<2 x i64> %v, <2 x i64> %indices) nounw
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; XOP-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vmovq %xmm1, %rax
+; XOP-NEXT: andl $1, %eax
+; XOP-NEXT: vpextrq $1, %xmm1, %rcx
+; XOP-NEXT: andl $1, %ecx
+; XOP-NEXT: vmovaps %xmm0, -24(%rsp)
+; XOP-NEXT: vmovq -24(%rsp,%rcx,8), %xmm0 # xmm0 = mem[0],zero
+; XOP-NEXT: vmovq -24(%rsp,%rax,8), %xmm1 # xmm1 = mem[0],zero
+; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; XOP-NEXT: vpandn %xmm0, %xmm2, %xmm0
; XOP-NEXT: retq
;
@@ -150,8 +158,14 @@ define <2 x i64> @var_shuffle_zero_v2i64(<2 x i64> %v, <2 x i64> %indices) nounw
; AVX1-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX1-NEXT: vmovq -24(%rsp,%rcx,8), %xmm0 # xmm0 = mem[0],zero
+; AVX1-NEXT: vmovq -24(%rsp,%rax,8), %xmm1 # xmm1 = mem[0],zero
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
@@ -160,8 +174,14 @@ define <2 x i64> @var_shuffle_zero_v2i64(<2 x i64> %v, <2 x i64> %indices) nounw
; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX2-NEXT: vmovq -24(%rsp,%rcx,8), %xmm0 # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq -24(%rsp,%rax,8), %xmm1 # xmm1 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
@@ -297,39 +317,73 @@ define <4 x i32> @var_shuffle_zero_v4i32(<4 x i32> %v, <4 x i32> %indices) nounw
;
; SSSE3-LABEL: var_shuffle_zero_v4i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSSE3-NEXT: pxor %xmm1, %xmm2
-; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSSE3-NEXT: por %xmm2, %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [67372036,67372036,67372036,67372036]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm3, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSSE3-NEXT: pmuludq %xmm3, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
+; SSSE3-NEXT: movaps %xmm0, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: movd %xmm1, %eax
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
+; SSSE3-NEXT: movd %xmm3, %ecx
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSSE3-NEXT: movd %xmm3, %edx
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSSE3-NEXT: movd %xmm1, %esi
+; SSSE3-NEXT: movaps %xmm2, -24(%rsp)
+; SSSE3-NEXT: andl $3, %eax
+; SSSE3-NEXT: andl $3, %ecx
+; SSSE3-NEXT: andl $3, %edx
+; SSSE3-NEXT: andl $3, %esi
+; SSSE3-NEXT: movd -24(%rsp,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movd -24(%rsp,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movd -24(%rsp,%rcx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSSE3-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: por %xmm2, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSSE3-NEXT: pandn %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_zero_v4i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxbd {{.*#+}} xmm2 = [4,4,4,4]
-; SSE41-NEXT: pmaxud %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: movaps %xmm0, %xmm2
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [4,4,4,4]
+; SSE41-NEXT: pmaxud %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: movd %xmm1, %eax
+; SSE41-NEXT: pextrd $1, %xmm1, %ecx
+; SSE41-NEXT: pextrd $2, %xmm1, %edx
+; SSE41-NEXT: pextrd $3, %xmm1, %esi
+; SSE41-NEXT: movaps %xmm2, -24(%rsp)
+; SSE41-NEXT: andl $3, %eax
+; SSE41-NEXT: andl $3, %ecx
+; SSE41-NEXT: andl $3, %edx
+; SSE41-NEXT: andl $3, %esi
+; SSE41-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; SSE41-NEXT: pinsrd $1, -24(%rsp,%rcx,4), %xmm1
+; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm1
+; SSE41-NEXT: pinsrd $3, -24(%rsp,%rsi,4), %xmm1
+; SSE41-NEXT: pandn %xmm1, %xmm0
; SSE41-NEXT: retq
;
; XOP-LABEL: var_shuffle_zero_v4i32:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpermilps %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vmovd %xmm1, %eax
+; XOP-NEXT: vpextrd $1, %xmm1, %ecx
+; XOP-NEXT: vpextrd $2, %xmm1, %edx
+; XOP-NEXT: vpextrd $3, %xmm1, %esi
+; XOP-NEXT: vmovaps %xmm0, -24(%rsp)
+; XOP-NEXT: andl $3, %eax
+; XOP-NEXT: andl $3, %ecx
+; XOP-NEXT: andl $3, %edx
+; XOP-NEXT: andl $3, %esi
+; XOP-NEXT: vmovd -24(%rsp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; XOP-NEXT: vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
+; XOP-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
+; XOP-NEXT: vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
; XOP-NEXT: vpandn %xmm0, %xmm2, %xmm0
; XOP-NEXT: retq
;
@@ -338,7 +392,19 @@ define <4 x i32> @var_shuffle_zero_v4i32(<4 x i32> %v, <4 x i32> %indices) nounw
; AVX1-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpermilps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX1-NEXT: vpextrd $2, %xmm1, %edx
+; AVX1-NEXT: vpextrd $3, %xmm1, %esi
+; AVX1-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: andl $3, %ecx
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: vmovd -24(%rsp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
@@ -348,7 +414,19 @@ define <4 x i32> @var_shuffle_zero_v4i32(<4 x i32> %v, <4 x i32> %indices) nounw
; AVX2-NEXT: vpmaxud %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpermilps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX2-NEXT: vpextrd $2, %xmm1, %edx
+; AVX2-NEXT: vpextrd $3, %xmm1, %esi
+; AVX2-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX2-NEXT: andl $3, %eax
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: vmovd -24(%rsp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
+; AVX2-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
+; AVX2-NEXT: vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
@@ -501,39 +579,39 @@ define <8 x i16> @var_shuffle_zero_v8i16(<8 x i16> %v, <8 x i16> %indices) nounw
; SSE3-NEXT: pextrw $0, %xmm1, %eax
; SSE3-NEXT: pextrw $1, %xmm1, %ecx
; SSE3-NEXT: pextrw $2, %xmm1, %edx
-; SSE3-NEXT: pextrw $3, %xmm1, %esi
-; SSE3-NEXT: pextrw $4, %xmm1, %edi
-; SSE3-NEXT: pextrw $5, %xmm1, %r8d
-; SSE3-NEXT: pextrw $6, %xmm1, %r9d
-; SSE3-NEXT: pextrw $7, %xmm1, %r10d
+; SSE3-NEXT: pextrw $3, %xmm1, %edi
+; SSE3-NEXT: pextrw $4, %xmm1, %r8d
+; SSE3-NEXT: pextrw $5, %xmm1, %r9d
+; SSE3-NEXT: pextrw $6, %xmm1, %r10d
+; SSE3-NEXT: pextrw $7, %xmm1, %esi
; SSE3-NEXT: movdqa %xmm2, -24(%rsp)
; SSE3-NEXT: andl $7, %eax
+; SSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSE3-NEXT: andl $7, %ecx
+; SSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSE3-NEXT: andl $7, %edx
-; SSE3-NEXT: andl $7, %esi
+; SSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx
; SSE3-NEXT: andl $7, %edi
+; SSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi
; SSE3-NEXT: andl $7, %r8d
+; SSE3-NEXT: movzwl -24(%rsp,%r8,2), %r8d
; SSE3-NEXT: andl $7, %r9d
+; SSE3-NEXT: movzwl -24(%rsp,%r9,2), %r9d
; SSE3-NEXT: andl $7, %r10d
; SSE3-NEXT: movzwl -24(%rsp,%r10,2), %r10d
-; SSE3-NEXT: movd %r10d, %xmm1
-; SSE3-NEXT: movzwl -24(%rsp,%r9,2), %r9d
-; SSE3-NEXT: movd %r9d, %xmm2
+; SSE3-NEXT: andl $7, %esi
+; SSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
+; SSE3-NEXT: movd %esi, %xmm1
+; SSE3-NEXT: movd %r10d, %xmm2
; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE3-NEXT: movzwl -24(%rsp,%r8,2), %r8d
-; SSE3-NEXT: movd %r8d, %xmm1
-; SSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi
-; SSE3-NEXT: movd %edi, %xmm3
+; SSE3-NEXT: movd %r9d, %xmm1
+; SSE3-NEXT: movd %r8d, %xmm3
; SSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; SSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; SSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
-; SSE3-NEXT: movd %esi, %xmm1
-; SSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; SSE3-NEXT: movd %edi, %xmm1
; SSE3-NEXT: movd %edx, %xmm2
; SSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
; SSE3-NEXT: movd %ecx, %xmm1
-; SSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
; SSE3-NEXT: movd %eax, %xmm4
; SSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; SSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
@@ -543,37 +621,136 @@ define <8 x i16> @var_shuffle_zero_v8i16(<8 x i16> %v, <8 x i16> %indices) nounw
;
; SSSE3-LABEL: var_shuffle_zero_v8i16:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8]
-; SSSE3-NEXT: psubusw %xmm1, %xmm2
-; SSSE3-NEXT: pxor %xmm3, %xmm3
-; SSSE3-NEXT: pcmpeqw %xmm2, %xmm3
-; SSSE3-NEXT: por %xmm3, %xmm1
-; SSSE3-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [514,514,514,514,514,514,514,514]
-; SSSE3-NEXT: paddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: por %xmm3, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8]
+; SSSE3-NEXT: psubusw %xmm1, %xmm3
+; SSSE3-NEXT: pxor %xmm0, %xmm0
+; SSSE3-NEXT: pcmpeqw %xmm3, %xmm0
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: pextrw $0, %xmm1, %eax
+; SSSE3-NEXT: pextrw $1, %xmm1, %ecx
+; SSSE3-NEXT: pextrw $2, %xmm1, %edx
+; SSSE3-NEXT: pextrw $3, %xmm1, %edi
+; SSSE3-NEXT: pextrw $4, %xmm1, %r8d
+; SSSE3-NEXT: pextrw $5, %xmm1, %r9d
+; SSSE3-NEXT: pextrw $6, %xmm1, %r10d
+; SSSE3-NEXT: pextrw $7, %xmm1, %esi
+; SSSE3-NEXT: movdqa %xmm2, -24(%rsp)
+; SSSE3-NEXT: andl $7, %eax
+; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; SSSE3-NEXT: andl $7, %ecx
+; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
+; SSSE3-NEXT: andl $7, %edx
+; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; SSSE3-NEXT: andl $7, %edi
+; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi
+; SSSE3-NEXT: andl $7, %r8d
+; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %r8d
+; SSSE3-NEXT: andl $7, %r9d
+; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %r9d
+; SSSE3-NEXT: andl $7, %r10d
+; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %r10d
+; SSSE3-NEXT: andl $7, %esi
+; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
+; SSSE3-NEXT: movd %esi, %xmm1
+; SSSE3-NEXT: movd %r10d, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: movd %r9d, %xmm1
+; SSSE3-NEXT: movd %r8d, %xmm3
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSSE3-NEXT: movd %edi, %xmm1
+; SSSE3-NEXT: movd %edx, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: movd %eax, %xmm4
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
+; SSSE3-NEXT: pandn %xmm4, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_zero_v8i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxbw {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8]
-; SSE41-NEXT: pmaxuw %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqw %xmm1, %xmm2
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [514,514,514,514,514,514,514,514]
-; SSE41-NEXT: paddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: movaps %xmm0, %xmm2
+; SSE41-NEXT: pmovsxbw {{.*#+}} xmm0 = [8,8,8,8,8,8,8,8]
+; SSE41-NEXT: pmaxuw %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: pextrw $0, %xmm1, %ecx
+; SSE41-NEXT: pextrw $1, %xmm1, %edx
+; SSE41-NEXT: pextrw $2, %xmm1, %esi
+; SSE41-NEXT: pextrw $3, %xmm1, %edi
+; SSE41-NEXT: pextrw $4, %xmm1, %r8d
+; SSE41-NEXT: pextrw $5, %xmm1, %r9d
+; SSE41-NEXT: pextrw $6, %xmm1, %r10d
+; SSE41-NEXT: pextrw $7, %xmm1, %eax
+; SSE41-NEXT: movaps %xmm2, -24(%rsp)
+; SSE41-NEXT: andl $7, %ecx
+; SSE41-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
+; SSE41-NEXT: andl $7, %edx
+; SSE41-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; SSE41-NEXT: andl $7, %esi
+; SSE41-NEXT: movzwl -24(%rsp,%rsi,2), %esi
+; SSE41-NEXT: andl $7, %edi
+; SSE41-NEXT: movzwl -24(%rsp,%rdi,2), %edi
+; SSE41-NEXT: andl $7, %r8d
+; SSE41-NEXT: movzwl -24(%rsp,%r8,2), %r8d
+; SSE41-NEXT: andl $7, %r9d
+; SSE41-NEXT: movzwl -24(%rsp,%r9,2), %r9d
+; SSE41-NEXT: andl $7, %r10d
+; SSE41-NEXT: movzwl -24(%rsp,%r10,2), %r10d
+; SSE41-NEXT: andl $7, %eax
+; SSE41-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; SSE41-NEXT: movd %ecx, %xmm1
+; SSE41-NEXT: pinsrw $1, %edx, %xmm1
+; SSE41-NEXT: pinsrw $2, %esi, %xmm1
+; SSE41-NEXT: pinsrw $3, %edi, %xmm1
+; SSE41-NEXT: pinsrw $4, %r8d, %xmm1
+; SSE41-NEXT: pinsrw $5, %r9d, %xmm1
+; SSE41-NEXT: pinsrw $6, %r10d, %xmm1
+; SSE41-NEXT: pinsrw $7, %eax, %xmm1
+; SSE41-NEXT: pandn %xmm1, %xmm0
; SSE41-NEXT: retq
;
; XOP-LABEL: var_shuffle_zero_v8i16:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [514,514,514,514,514,514,514,514]
-; XOP-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; XOP-NEXT: vpor %xmm2, %xmm1, %xmm1
-; XOP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $0, %xmm1, %ecx
+; XOP-NEXT: vpextrw $1, %xmm1, %edx
+; XOP-NEXT: vpextrw $2, %xmm1, %esi
+; XOP-NEXT: vpextrw $3, %xmm1, %edi
+; XOP-NEXT: vpextrw $4, %xmm1, %r8d
+; XOP-NEXT: vpextrw $5, %xmm1, %r9d
+; XOP-NEXT: vpextrw $6, %xmm1, %r10d
+; XOP-NEXT: vpextrw $7, %xmm1, %eax
+; XOP-NEXT: vmovaps %xmm0, -24(%rsp)
+; XOP-NEXT: andl $7, %ecx
+; XOP-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
+; XOP-NEXT: andl $7, %edx
+; XOP-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; XOP-NEXT: andl $7, %esi
+; XOP-NEXT: movzwl -24(%rsp,%rsi,2), %esi
+; XOP-NEXT: andl $7, %edi
+; XOP-NEXT: movzwl -24(%rsp,%rdi,2), %edi
+; XOP-NEXT: andl $7, %r8d
+; XOP-NEXT: movzwl -24(%rsp,%r8,2), %r8d
+; XOP-NEXT: andl $7, %r9d
+; XOP-NEXT: movzwl -24(%rsp,%r9,2), %r9d
+; XOP-NEXT: andl $7, %r10d
+; XOP-NEXT: movzwl -24(%rsp,%r10,2), %r10d
+; XOP-NEXT: andl $7, %eax
+; XOP-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; XOP-NEXT: vmovd %ecx, %xmm0
+; XOP-NEXT: vpinsrw $1, %edx, %xmm0, %xmm0
+; XOP-NEXT: vpinsrw $2, %esi, %xmm0, %xmm0
+; XOP-NEXT: vpinsrw $3, %edi, %xmm0, %xmm0
+; XOP-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; XOP-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; XOP-NEXT: vpinsrw $6, %r10d, %xmm0, %xmm0
+; XOP-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpandn %xmm0, %xmm2, %xmm0
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v8i16:
@@ -581,10 +758,40 @@ define <8 x i16> @var_shuffle_zero_v8i16(<8 x i16> %v, <8 x i16> %indices) nounw
; AVX1-NEXT: vpmaxuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [514,514,514,514,514,514,514,514]
-; AVX1-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm1, %ecx
+; AVX1-NEXT: vpextrw $1, %xmm1, %edx
+; AVX1-NEXT: vpextrw $2, %xmm1, %esi
+; AVX1-NEXT: vpextrw $3, %xmm1, %edi
+; AVX1-NEXT: vpextrw $4, %xmm1, %r8d
+; AVX1-NEXT: vpextrw $5, %xmm1, %r9d
+; AVX1-NEXT: vpextrw $6, %xmm1, %r10d
+; AVX1-NEXT: vpextrw $7, %xmm1, %eax
+; AVX1-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX1-NEXT: andl $7, %ecx
+; AVX1-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
+; AVX1-NEXT: andl $7, %edx
+; AVX1-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; AVX1-NEXT: andl $7, %esi
+; AVX1-NEXT: movzwl -24(%rsp,%rsi,2), %esi
+; AVX1-NEXT: andl $7, %edi
+; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %edi
+; AVX1-NEXT: andl $7, %r8d
+; AVX1-NEXT: movzwl -24(%rsp,%r8,2), %r8d
+; AVX1-NEXT: andl $7, %r9d
+; AVX1-NEXT: movzwl -24(%rsp,%r9,2), %r9d
+; AVX1-NEXT: andl $7, %r10d
+; AVX1-NEXT: movzwl -24(%rsp,%r10,2), %r10d
+; AVX1-NEXT: andl $7, %eax
+; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; AVX1-NEXT: vmovd %ecx, %xmm0
+; AVX1-NEXT: vpinsrw $1, %edx, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $2, %esi, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $3, %edi, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $6, %r10d, %xmm0, %xmm0
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v8i16:
@@ -592,10 +799,40 @@ define <8 x i16> @var_shuffle_zero_v8i16(<8 x i16> %v, <8 x i16> %indices) nounw
; AVX2-NEXT: vpmaxuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [514,514,514,514,514,514,514,514]
-; AVX2-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpor %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $0, %xmm1, %ecx
+; AVX2-NEXT: vpextrw $1, %xmm1, %edx
+; AVX2-NEXT: vpextrw $2, %xmm1, %esi
+; AVX2-NEXT: vpextrw $3, %xmm1, %edi
+; AVX2-NEXT: vpextrw $4, %xmm1, %r8d
+; AVX2-NEXT: vpextrw $5, %xmm1, %r9d
+; AVX2-NEXT: vpextrw $6, %xmm1, %r10d
+; AVX2-NEXT: vpextrw $7, %xmm1, %eax
+; AVX2-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX2-NEXT: andl $7, %ecx
+; AVX2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
+; AVX2-NEXT: andl $7, %edx
+; AVX2-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; AVX2-NEXT: andl $7, %esi
+; AVX2-NEXT: movzwl -24(%rsp,%rsi,2), %esi
+; AVX2-NEXT: andl $7, %edi
+; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %edi
+; AVX2-NEXT: andl $7, %r8d
+; AVX2-NEXT: movzwl -24(%rsp,%r8,2), %r8d
+; AVX2-NEXT: andl $7, %r9d
+; AVX2-NEXT: movzwl -24(%rsp,%r9,2), %r9d
+; AVX2-NEXT: andl $7, %r10d
+; AVX2-NEXT: movzwl -24(%rsp,%r10,2), %r10d
+; AVX2-NEXT: andl $7, %eax
+; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; AVX2-NEXT: vmovd %ecx, %xmm0
+; AVX2-NEXT: vpinsrw $1, %edx, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $2, %esi, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $3, %edi, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $4, %r8d, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $5, %r9d, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $6, %r10d, %xmm0, %xmm0
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_zero_v8i16:
@@ -882,27 +1119,240 @@ define <16 x i8> @var_shuffle_zero_v16i8(<16 x i8> %v, <16 x i8> %indices) nounw
;
; SSSE3-LABEL: var_shuffle_zero_v16i8:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSSE3-NEXT: pmaxub %xmm1, %xmm2
-; SSSE3-NEXT: pcmpeqb %xmm1, %xmm2
-; SSSE3-NEXT: por %xmm1, %xmm2
-; SSSE3-NEXT: pshufb %xmm2, %xmm0
+; SSSE3-NEXT: movaps %xmm0, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; SSSE3-NEXT: pmaxub %xmm1, %xmm0
+; SSSE3-NEXT: pcmpeqb %xmm1, %xmm0
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: movdqa %xmm1, -40(%rsp)
+; SSSE3-NEXT: movaps %xmm2, -24(%rsp)
+; SSSE3-NEXT: movzbl -25(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: movzbl -26(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: movzbl -27(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm4
+; SSSE3-NEXT: movzbl -28(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: movzbl -29(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm6
+; SSSE3-NEXT: movzbl -30(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm7
+; SSSE3-NEXT: movzbl -31(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm8
+; SSSE3-NEXT: movzbl -32(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm5
+; SSSE3-NEXT: movzbl -33(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm9
+; SSSE3-NEXT: movzbl -34(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm10
+; SSSE3-NEXT: movzbl -35(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm12
+; SSSE3-NEXT: movzbl -36(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm11
+; SSSE3-NEXT: movzbl -37(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm13
+; SSSE3-NEXT: movzbl -38(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm14
+; SSSE3-NEXT: movzbl -39(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm15
+; SSSE3-NEXT: movzbl -40(%rsp), %eax
+; SSSE3-NEXT: andl $15, %eax
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; SSSE3-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3],xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; SSSE3-NEXT: pandn %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_zero_v16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; SSE41-NEXT: pmaxub %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqb %xmm1, %xmm2
-; SSE41-NEXT: por %xmm1, %xmm2
-; SSE41-NEXT: pshufb %xmm2, %xmm0
+; SSE41-NEXT: movaps %xmm0, %xmm2
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; SSE41-NEXT: pmaxub %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqb %xmm1, %xmm0
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: pextrb $0, %xmm1, %eax
+; SSE41-NEXT: movaps %xmm2, -24(%rsp)
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: movd %eax, %xmm2
+; SSE41-NEXT: pextrb $1, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $1, %eax, %xmm2
+; SSE41-NEXT: pextrb $2, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $2, %eax, %xmm2
+; SSE41-NEXT: pextrb $3, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $3, %eax, %xmm2
+; SSE41-NEXT: pextrb $4, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $4, %eax, %xmm2
+; SSE41-NEXT: pextrb $5, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $5, %eax, %xmm2
+; SSE41-NEXT: pextrb $6, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $6, %eax, %xmm2
+; SSE41-NEXT: pextrb $7, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $7, %eax, %xmm2
+; SSE41-NEXT: pextrb $8, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $8, %eax, %xmm2
+; SSE41-NEXT: pextrb $9, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $9, %eax, %xmm2
+; SSE41-NEXT: pextrb $10, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $10, %eax, %xmm2
+; SSE41-NEXT: pextrb $11, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $11, %eax, %xmm2
+; SSE41-NEXT: pextrb $12, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $12, %eax, %xmm2
+; SSE41-NEXT: pextrb $13, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $13, %eax, %xmm2
+; SSE41-NEXT: pextrb $14, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $14, %eax, %xmm2
+; SSE41-NEXT: pextrb $15, %xmm1, %eax
+; SSE41-NEXT: andl $15, %eax
+; SSE41-NEXT: movzbl -24(%rsp,%rax), %eax
+; SSE41-NEXT: pinsrb $15, %eax, %xmm2
+; SSE41-NEXT: pandn %xmm2, %xmm0
; SSE41-NEXT: retq
;
; XOP-LABEL: var_shuffle_zero_v16i8:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $0, %xmm1, %eax
+; XOP-NEXT: vmovaps %xmm0, -24(%rsp)
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vmovd %eax, %xmm0
+; XOP-NEXT: vpextrb $1, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $2, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $3, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $4, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $5, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $6, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $7, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $8, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $9, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $10, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $11, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $12, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $13, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $14, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $15, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzbl -24(%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpandn %xmm0, %xmm2, %xmm0
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v16i8:
@@ -910,7 +1360,72 @@ define <16 x i8> @var_shuffle_zero_v16i8(<16 x i8> %v, <16 x i8> %indices) nounw
; AVX1-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v16i8:
@@ -918,7 +1433,72 @@ define <16 x i8> @var_shuffle_zero_v16i8(<16 x i8> %v, <16 x i8> %indices) nounw
; AVX2-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzbl -24(%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: var_shuffle_zero_v16i8:
@@ -1053,8 +1633,9 @@ define <2 x double> @var_shuffle_zero_v2f64(<2 x double> %v, <2 x i64> %indices)
; SSE3-NEXT: movq %xmm1, %rcx
; SSE3-NEXT: andl $1, %ecx
; SSE3-NEXT: movaps %xmm0, -24(%rsp)
-; SSE3-NEXT: movsd -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
-; SSE3-NEXT: movhps -24(%rsp,%rcx,8), %xmm0 # xmm0 = xmm0[0,1],mem[0,1]
+; SSE3-NEXT: movq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; SSE3-NEXT: movq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; SSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE3-NEXT: pandn %xmm0, %xmm2
; SSE3-NEXT: movdqa %xmm2, %xmm0
; SSE3-NEXT: retq
@@ -1077,40 +1658,49 @@ define <2 x double> @var_shuffle_zero_v2f64(<2 x double> %v, <2 x i64> %indices)
; SSSE3-NEXT: movq %xmm1, %rcx
; SSSE3-NEXT: andl $1, %ecx
; SSSE3-NEXT: movaps %xmm0, -24(%rsp)
-; SSSE3-NEXT: movsd -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
-; SSSE3-NEXT: movhps -24(%rsp,%rcx,8), %xmm0 # xmm0 = xmm0[0,1],mem[0,1]
+; SSSE3-NEXT: movq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; SSSE3-NEXT: movq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: pandn %xmm0, %xmm2
; SSSE3-NEXT: movdqa %xmm2, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_zero_v2f64:
; SSE41: # %bb.0:
-; SSE41-NEXT: movapd %xmm0, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
-; SSE41-NEXT: pxor %xmm1, %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,2,2]
-; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
-; SSE41-NEXT: pand %xmm3, %xmm4
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE41-NEXT: por %xmm4, %xmm3
-; SSE41-NEXT: por %xmm3, %xmm1
-; SSE41-NEXT: pxor %xmm0, %xmm0
-; SSE41-NEXT: pcmpeqq %xmm1, %xmm0
-; SSE41-NEXT: movddup {{.*#+}} xmm1 = xmm2[0,0]
-; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
-; SSE41-NEXT: blendvpd %xmm0, %xmm1, %xmm2
-; SSE41-NEXT: pandn %xmm2, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT: pxor %xmm1, %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; SSE41-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT: pcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
+; SSE41-NEXT: pand %xmm4, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT: por %xmm3, %xmm2
+; SSE41-NEXT: por %xmm2, %xmm1
+; SSE41-NEXT: movq %xmm1, %rax
+; SSE41-NEXT: andl $1, %eax
+; SSE41-NEXT: pextrq $1, %xmm1, %rcx
+; SSE41-NEXT: andl $1, %ecx
+; SSE41-NEXT: movaps %xmm0, -24(%rsp)
+; SSE41-NEXT: movq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; SSE41-NEXT: movq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE41-NEXT: pandn %xmm0, %xmm2
+; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq
;
; XOP-LABEL: var_shuffle_zero_v2f64:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; XOP-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vmovq %xmm1, %rax
+; XOP-NEXT: andl $1, %eax
+; XOP-NEXT: vpextrq $1, %xmm1, %rcx
+; XOP-NEXT: andl $1, %ecx
+; XOP-NEXT: vmovaps %xmm0, -24(%rsp)
+; XOP-NEXT: vmovq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; XOP-NEXT: vmovq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; XOP-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; XOP-NEXT: vpandn %xmm0, %xmm2, %xmm0
; XOP-NEXT: retq
;
@@ -1119,8 +1709,14 @@ define <2 x double> @var_shuffle_zero_v2f64(<2 x double> %v, <2 x i64> %indices)
; AVX1-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX1-NEXT: andl $1, %ecx
+; AVX1-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX1-NEXT: vmovq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; AVX1-NEXT: vmovq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
@@ -1129,8 +1725,14 @@ define <2 x double> @var_shuffle_zero_v2f64(<2 x double> %v, <2 x i64> %indices)
; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpermilpd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT: andl $1, %ecx
+; AVX2-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX2-NEXT: vmovq -24(%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq -24(%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
@@ -1251,54 +1853,94 @@ define <4 x float> @var_shuffle_zero_v4f32(<4 x float> %v, <4 x i32> %indices) n
; SSE3-NEXT: movd %xmm1, %esi
; SSE3-NEXT: movaps %xmm2, -24(%rsp)
; SSE3-NEXT: andl $3, %eax
+; SSE3-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
; SSE3-NEXT: andl $3, %ecx
+; SSE3-NEXT: movd -24(%rsp,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
; SSE3-NEXT: andl $3, %edx
+; SSE3-NEXT: movd -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
; SSE3-NEXT: andl $3, %esi
-; SSE3-NEXT: movd -24(%rsp,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
-; SSE3-NEXT: movd -24(%rsp,%rdx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
-; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE3-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
-; SSE3-NEXT: movd -24(%rsp,%rcx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
-; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE3-NEXT: movd -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; SSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; SSE3-NEXT: pandn %xmm1, %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: var_shuffle_zero_v4f32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
-; SSSE3-NEXT: pxor %xmm1, %xmm2
-; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSSE3-NEXT: por %xmm2, %xmm1
-; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [67372036,67372036,67372036,67372036]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm3, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSSE3-NEXT: pmuludq %xmm3, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,2,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; SSSE3-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSSE3-NEXT: por %xmm2, %xmm1
-; SSSE3-NEXT: pshufb %xmm1, %xmm0
+; SSSE3-NEXT: movaps %xmm0, %xmm2
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSSE3-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSSE3-NEXT: por %xmm0, %xmm1
+; SSSE3-NEXT: movd %xmm1, %eax
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
+; SSSE3-NEXT: movd %xmm3, %ecx
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
+; SSSE3-NEXT: movd %xmm3, %edx
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; SSSE3-NEXT: movd %xmm1, %esi
+; SSSE3-NEXT: movaps %xmm2, -24(%rsp)
+; SSSE3-NEXT: andl $3, %eax
+; SSSE3-NEXT: movd -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: andl $3, %ecx
+; SSSE3-NEXT: movd -24(%rsp,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: andl $3, %edx
+; SSSE3-NEXT: movd -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; SSSE3-NEXT: andl $3, %esi
+; SSSE3-NEXT: movd -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; SSSE3-NEXT: pandn %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: var_shuffle_zero_v4f32:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmovsxbd {{.*#+}} xmm2 = [4,4,4,4]
-; SSE41-NEXT: pmaxud %xmm1, %xmm2
-; SSE41-NEXT: pcmpeqd %xmm1, %xmm2
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: por %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: movaps %xmm0, %xmm2
+; SSE41-NEXT: pmovsxbd {{.*#+}} xmm0 = [4,4,4,4]
+; SSE41-NEXT: pmaxud %xmm1, %xmm0
+; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE41-NEXT: por %xmm0, %xmm1
+; SSE41-NEXT: movd %xmm1, %eax
+; SSE41-NEXT: pextrd $1, %xmm1, %ecx
+; SSE41-NEXT: pextrd $2, %xmm1, %edx
+; SSE41-NEXT: pextrd $3, %xmm1, %esi
+; SSE41-NEXT: movaps %xmm2, -24(%rsp)
+; SSE41-NEXT: andl $3, %eax
+; SSE41-NEXT: movss -24(%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; SSE41-NEXT: andl $3, %ecx
+; SSE41-NEXT: movss -24(%rsp,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
+; SSE41-NEXT: andl $3, %edx
+; SSE41-NEXT: movss -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; SSE41-NEXT: andl $3, %esi
+; SSE41-NEXT: movss -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
+; SSE41-NEXT: pandn %xmm1, %xmm0
; SSE41-NEXT: retq
;
; XOP-LABEL: var_shuffle_zero_v4f32:
; XOP: # %bb.0:
; XOP-NEXT: vpcomgtud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; XOP-NEXT: vpor %xmm1, %xmm2, %xmm1
-; XOP-NEXT: vpermilps %xmm1, %xmm0, %xmm0
+; XOP-NEXT: vmovd %xmm1, %eax
+; XOP-NEXT: vpextrd $1, %xmm1, %ecx
+; XOP-NEXT: vpextrd $2, %xmm1, %edx
+; XOP-NEXT: vpextrd $3, %xmm1, %esi
+; XOP-NEXT: vmovaps %xmm0, -24(%rsp)
+; XOP-NEXT: andl $3, %eax
+; XOP-NEXT: vmovss -24(%rsp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $3, %ecx
+; XOP-NEXT: vmovss -24(%rsp,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $3, %edx
+; XOP-NEXT: vmovss -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $3, %esi
+; XOP-NEXT: vmovss -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; XOP-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; XOP-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; XOP-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; XOP-NEXT: vpandn %xmm0, %xmm2, %xmm0
; XOP-NEXT: retq
;
@@ -1307,7 +1949,22 @@ define <4 x float> @var_shuffle_zero_v4f32(<4 x float> %v, <4 x i32> %indices) n
; AVX1-NEXT: vpmaxud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpermilps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX1-NEXT: vpextrd $2, %xmm1, %edx
+; AVX1-NEXT: vpextrd $3, %xmm1, %esi
+; AVX1-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: vmovss -24(%rsp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $3, %ecx
+; AVX1-NEXT: vmovss -24(%rsp,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: vmovss -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: vmovss -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; AVX1-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
@@ -1317,7 +1974,22 @@ define <4 x float> @var_shuffle_zero_v4f32(<4 x float> %v, <4 x i32> %indices) n
; AVX2-NEXT: vpmaxud %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm2
; AVX2-NEXT: vpor %xmm1, %xmm2, %xmm1
-; AVX2-NEXT: vpermilps %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX2-NEXT: vpextrd $2, %xmm1, %edx
+; AVX2-NEXT: vpextrd $3, %xmm1, %esi
+; AVX2-NEXT: vmovaps %xmm0, -24(%rsp)
+; AVX2-NEXT: andl $3, %eax
+; AVX2-NEXT: vmovss -24(%rsp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: vmovss -24(%rsp,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: vmovss -24(%rsp,%rdx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: vmovss -24(%rsp,%rsi,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; AVX2-NEXT: vpandn %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/var-permute-256.ll b/llvm/test/CodeGen/X86/var-permute-256.ll
index 283c6a303a581..52cd42ba463cc 100644
--- a/llvm/test/CodeGen/X86/var-permute-256.ll
+++ b/llvm/test/CodeGen/X86/var-permute-256.ll
@@ -81,24 +81,44 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
define <4 x i64> @var_shuffle_zero_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; XOP-LABEL: var_shuffle_zero_v4i64:
; XOP: # %bb.0:
+; XOP-NEXT: pushq %rbp
+; XOP-NEXT: movq %rsp, %rbp
+; XOP-NEXT: andq $-32, %rsp
+; XOP-NEXT: subq $64, %rsp
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vpmovsxbq {{.*#+}} xmm3 = [3,3]
; XOP-NEXT: vpcomgtuq %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpcomgtuq %xmm3, %xmm1, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vorps %ymm1, %ymm2, %ymm1
-; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm3
+; XOP-NEXT: vmovq %xmm1, %rax
+; XOP-NEXT: andl $3, %eax
+; XOP-NEXT: vpextrq $1, %xmm1, %rcx
+; XOP-NEXT: andl $3, %ecx
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
-; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; XOP-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm3, %ymm0, %ymm0
+; XOP-NEXT: vmovq %xmm1, %rdx
+; XOP-NEXT: andl $3, %edx
+; XOP-NEXT: vpextrq $1, %xmm1, %rsi
+; XOP-NEXT: andl $3, %esi
+; XOP-NEXT: vmovaps %ymm0, (%rsp)
+; XOP-NEXT: vmovsd (%rsp,%rsi,8), %xmm0 # xmm0 = mem[0],zero
+; XOP-NEXT: vmovsd (%rsp,%rdx,8), %xmm1 # xmm1 = mem[0],zero
+; XOP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; XOP-NEXT: vmovsd (%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; XOP-NEXT: vmovsd (%rsp,%rax,8), %xmm3 # xmm3 = mem[0],zero
+; XOP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOP-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: movq %rbp, %rsp
+; XOP-NEXT: popq %rbp
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v4i64:
; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: # xmm3 = mem[0,0]
@@ -110,37 +130,59 @@ define <4 x i64> @var_shuffle_zero_v4i64(<4 x i64> %v, <4 x i64> %indices) nounw
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm3
-; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX1-NEXT: andl $3, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX1-NEXT: vpermilpd %ymm1, %ymm3, %ymm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vblendvpd %ymm4, %ymm3, %ymm0, %ymm0
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: vmovsd (%rsp,%rsi,8), %xmm0 # xmm0 = mem[0],zero
+; AVX1-NEXT: vmovsd (%rsp,%rdx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovsd (%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX1-NEXT: vmovsd (%rsp,%rax,8), %xmm3 # xmm3 = mem[0],zero
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v4i64:
; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775811,9223372036854775811,9223372036854775811,9223372036854775811]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [2,2,2,2]
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm3
-; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX2-NEXT: vpermilpd %ymm1, %ymm4, %ymm4
-; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vblendvpd %ymm3, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: andl $3, %eax
+; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; AVX2-NEXT: vmovq (%rsp,%rsi,8), %xmm0 # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq (%rsp,%rdx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX2-NEXT: vmovq (%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX2-NEXT: vmovq (%rsp,%rax,8), %xmm3 # xmm3 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: var_shuffle_zero_v4i64:
@@ -238,20 +280,54 @@ define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
define <8 x i32> @var_shuffle_zero_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
; XOP-LABEL: var_shuffle_zero_v8i32:
; XOP: # %bb.0:
+; XOP-NEXT: pushq %rbp
+; XOP-NEXT: movq %rsp, %rbp
+; XOP-NEXT: andq $-32, %rsp
+; XOP-NEXT: subq $64, %rsp
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vbroadcastss {{.*#+}} xmm3 = [7,7,7,7]
; XOP-NEXT: vpcomgtud %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpcomgtud %xmm3, %xmm1, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vorps %ymm1, %ymm2, %ymm1
-; XOP-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; XOP-NEXT: vpermil2ps $0, %ymm1, %ymm3, %ymm0, %ymm0
+; XOP-NEXT: vmovd %xmm1, %eax
+; XOP-NEXT: vpextrd $1, %xmm1, %ecx
+; XOP-NEXT: vpextrd $2, %xmm1, %edx
+; XOP-NEXT: vpextrd $3, %xmm1, %esi
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
+; XOP-NEXT: vmovd %xmm1, %edi
+; XOP-NEXT: vpextrd $1, %xmm1, %r8d
+; XOP-NEXT: vpextrd $2, %xmm1, %r9d
+; XOP-NEXT: vpextrd $3, %xmm1, %r10d
+; XOP-NEXT: vmovaps %ymm0, (%rsp)
+; XOP-NEXT: andl $7, %eax
+; XOP-NEXT: andl $7, %ecx
+; XOP-NEXT: andl $7, %edx
+; XOP-NEXT: andl $7, %esi
+; XOP-NEXT: andl $7, %edi
+; XOP-NEXT: andl $7, %r8d
+; XOP-NEXT: andl $7, %r9d
+; XOP-NEXT: andl $7, %r10d
+; XOP-NEXT: vmovd (%rsp,%rdi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; XOP-NEXT: vpinsrd $1, (%rsp,%r8,4), %xmm0, %xmm0
+; XOP-NEXT: vpinsrd $2, (%rsp,%r9,4), %xmm0, %xmm0
+; XOP-NEXT: vpinsrd $3, (%rsp,%r10,4), %xmm0, %xmm0
+; XOP-NEXT: vmovd (%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; XOP-NEXT: vpinsrd $1, (%rsp,%rcx,4), %xmm1, %xmm1
+; XOP-NEXT: vpinsrd $2, (%rsp,%rdx,4), %xmm1, %xmm1
+; XOP-NEXT: vpinsrd $3, (%rsp,%rsi,4), %xmm1, %xmm1
+; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOP-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: movq %rbp, %rsp
+; XOP-NEXT: popq %rbp
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v8i32:
; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [8,8,8,8]
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm4
@@ -260,26 +336,78 @@ define <8 x i32> @var_shuffle_zero_v8i32(<8 x i32> %v, <8 x i32> %indices) nounw
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX1-NEXT: vpermilps %ymm1, %ymm4, %ymm4
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vblendvps %ymm3, %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX1-NEXT: vpextrd $2, %xmm1, %edx
+; AVX1-NEXT: vpextrd $3, %xmm1, %esi
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %edi
+; AVX1-NEXT: vpextrd $1, %xmm1, %r8d
+; AVX1-NEXT: vpextrd $2, %xmm1, %r9d
+; AVX1-NEXT: vpextrd $3, %xmm1, %r10d
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $7, %eax
+; AVX1-NEXT: andl $7, %ecx
+; AVX1-NEXT: andl $7, %edx
+; AVX1-NEXT: andl $7, %esi
+; AVX1-NEXT: andl $7, %edi
+; AVX1-NEXT: andl $7, %r8d
+; AVX1-NEXT: andl $7, %r9d
+; AVX1-NEXT: andl $7, %r10d
+; AVX1-NEXT: vmovd (%rsp,%rdi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: vpinsrd $1, (%rsp,%r8,4), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrd $2, (%rsp,%r9,4), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrd $3, (%rsp,%r10,4), %xmm0, %xmm0
+; AVX1-NEXT: vmovd (%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: vpinsrd $1, (%rsp,%rcx,4), %xmm1, %xmm1
+; AVX1-NEXT: vpinsrd $2, (%rsp,%rdx,4), %xmm1, %xmm1
+; AVX1-NEXT: vpinsrd $3, (%rsp,%rsi,4), %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v8i32:
; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8]
; AVX2-NEXT: vpmaxud %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vmovd %xmm1, %eax
+; AVX2-NEXT: vpextrd $1, %xmm1, %ecx
+; AVX2-NEXT: vpextrd $2, %xmm1, %edx
+; AVX2-NEXT: vpextrd $3, %xmm1, %esi
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %edi
+; AVX2-NEXT: vpextrd $1, %xmm1, %r8d
+; AVX2-NEXT: vpextrd $2, %xmm1, %r9d
+; AVX2-NEXT: vpextrd $3, %xmm1, %r10d
+; AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; AVX2-NEXT: andl $7, %eax
+; AVX2-NEXT: andl $7, %ecx
+; AVX2-NEXT: andl $7, %edx
+; AVX2-NEXT: andl $7, %esi
+; AVX2-NEXT: andl $7, %edi
+; AVX2-NEXT: andl $7, %r8d
+; AVX2-NEXT: andl $7, %r9d
+; AVX2-NEXT: andl $7, %r10d
+; AVX2-NEXT: vmovd (%rsp,%rdi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: vpinsrd $1, (%rsp,%r8,4), %xmm0, %xmm0
+; AVX2-NEXT: vpinsrd $2, (%rsp,%r9,4), %xmm0, %xmm0
+; AVX2-NEXT: vpinsrd $3, (%rsp,%r10,4), %xmm0, %xmm0
+; AVX2-NEXT: vmovd (%rsp,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: vpinsrd $1, (%rsp,%rcx,4), %xmm1, %xmm1
+; AVX2-NEXT: vpinsrd $2, (%rsp,%rdx,4), %xmm1, %xmm1
+; AVX2-NEXT: vpinsrd $3, (%rsp,%rsi,4), %xmm1, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: var_shuffle_zero_v8i32:
@@ -468,26 +596,94 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi
define <16 x i16> @var_shuffle_zero_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwind {
; XOP-LABEL: var_shuffle_zero_v16i16:
; XOP: # %bb.0:
+; XOP-NEXT: pushq %rbp
+; XOP-NEXT: movq %rsp, %rbp
+; XOP-NEXT: andq $-32, %rsp
+; XOP-NEXT: subq $64, %rsp
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vbroadcastss {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
; XOP-NEXT: vpcomgtuw %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpcomgtuw %xmm3, %xmm1, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vorps %ymm1, %ymm2, %ymm1
-; XOP-NEXT: vbroadcastss {{.*#+}} xmm3 = [256,256,256,256,256,256,256,256]
-; XOP-NEXT: vbroadcastss {{.*#+}} xmm4 = [514,514,514,514,514,514,514,514]
-; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm1, %xmm5
-; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
-; XOP-NEXT: vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3
-; XOP-NEXT: vpperm %xmm1, %xmm3, %xmm0, %xmm1
-; XOP-NEXT: vpperm %xmm5, %xmm3, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
+; XOP-NEXT: vpextrw $0, %xmm3, %eax
+; XOP-NEXT: vmovaps %ymm0, (%rsp)
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vmovd %eax, %xmm0
+; XOP-NEXT: vpextrw $1, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $2, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $3, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $4, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $5, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $6, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $7, %xmm3, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrw $0, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vmovd %eax, %xmm3
+; XOP-NEXT: vpextrw $1, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrw $2, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrw $3, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrw $4, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrw $5, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrw $6, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrw $7, %xmm1, %eax
+; XOP-NEXT: andl $15, %eax
+; XOP-NEXT: movzwl (%rsp,%rax,2), %eax
+; XOP-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
+; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOP-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: movq %rbp, %rsp
+; XOP-NEXT: popq %rbp
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v16i16:
; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
; AVX1-NEXT: vpmaxuw %xmm3, %xmm2, %xmm4
@@ -496,57 +692,238 @@ define <16 x i16> @var_shuffle_zero_v16i16(<16 x i16> %v, <16 x i16> %indices) n
; AVX1-NEXT: vpcmpeqw %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [514,514,514,514,514,514,514,514]
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm4
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm5 = [256,256,256,256,256,256,256,256]
-; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpaddw %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpcmpgtb %xmm3, %xmm1, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm7
-; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtb %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpshufb %xmm4, %xmm6, %xmm5
-; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX1-NEXT: vpblendvb %xmm3, %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT: vpextrw $0, %xmm3, %eax
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpextrw $1, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $2, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $3, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $4, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $5, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $6, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $7, %xmm3, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $0, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vmovd %eax, %xmm3
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrw $2, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrw $3, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrw $4, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrw $5, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrw $6, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrw $7, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v16i16:
; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpmaxuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqw %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [514,514,514,514,514,514,514,514,514,514,514,514,514,514,514,514]
-; AVX2-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpextrw $0, %xmm3, %eax
+; AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpextrw $1, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $2, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $3, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $4, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $5, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $6, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $7, %xmm3, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $0, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vmovd %eax, %xmm3
+; AVX2-NEXT: vpextrw $1, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrw $2, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrw $3, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrw $4, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrw $5, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrw $6, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrw $7, %xmm1, %eax
+; AVX2-NEXT: andl $15, %eax
+; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX2-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_zero_v16i16:
; AVX512F: # %bb.0:
+; AVX512F-NEXT: pushq %rbp
+; AVX512F-NEXT: movq %rsp, %rbp
+; AVX512F-NEXT: andq $-32, %rsp
+; AVX512F-NEXT: subq $64, %rsp
; AVX512F-NEXT: vpmaxuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
; AVX512F-NEXT: vpcmpeqw %ymm2, %ymm1, %ymm2
; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [514,514,514,514,514,514,514,514,514,514,514,514,514,514,514,514]
-; AVX512F-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; AVX512F-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512F-NEXT: vpextrw $0, %xmm3, %eax
+; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vmovd %eax, %xmm0
+; AVX512F-NEXT: vpextrw $1, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $2, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $3, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $4, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $5, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $6, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrw $0, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vmovd %eax, %xmm3
+; AVX512F-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512F-NEXT: andl $15, %eax
+; AVX512F-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512F-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512F-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: movq %rbp, %rsp
+; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: var_shuffle_zero_v16i16:
@@ -571,18 +948,83 @@ define <16 x i16> @var_shuffle_zero_v16i16(<16 x i16> %v, <16 x i16> %indices) n
;
; AVX512VLF-LABEL: var_shuffle_zero_v16i16:
; AVX512VLF: # %bb.0:
+; AVX512VLF-NEXT: pushq %rbp
+; AVX512VLF-NEXT: movq %rsp, %rbp
+; AVX512VLF-NEXT: andq $-32, %rsp
+; AVX512VLF-NEXT: subq $64, %rsp
; AVX512VLF-NEXT: vpmaxuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
; AVX512VLF-NEXT: vpcmpeqw %ymm2, %ymm1, %ymm2
; AVX512VLF-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512VLF-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [514,514,514,514,514,514,514,514,514,514,514,514,514,514,514,514]
-; AVX512VLF-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLF-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; AVX512VLF-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512VLF-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512VLF-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512VLF-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLF-NEXT: vpternlogq {{.*#+}} ymm1 = ymm3 ^ (ymm1 & (ymm0 ^ ymm3))
-; AVX512VLF-NEXT: vpandn %ymm1, %ymm2, %ymm0
+; AVX512VLF-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512VLF-NEXT: vpextrw $0, %xmm3, %eax
+; AVX512VLF-NEXT: vmovaps %ymm0, (%rsp)
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vmovd %eax, %xmm0
+; AVX512VLF-NEXT: vpextrw $1, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $2, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $3, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $4, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $5, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $6, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrw $0, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vmovd %eax, %xmm3
+; AVX512VLF-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VLF-NEXT: andl $15, %eax
+; AVX512VLF-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512VLF-NEXT: vpinsrw $7, %eax, %xmm3, %xmm1
+; AVX512VLF-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512VLF-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX512VLF-NEXT: movq %rbp, %rsp
+; AVX512VLF-NEXT: popq %rbp
; AVX512VLF-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_zero_v16i16:
@@ -830,6 +1272,10 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
define <32 x i8> @var_shuffle_zero_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
; XOP-LABEL: var_shuffle_zero_v32i8:
; XOP: # %bb.0:
+; XOP-NEXT: pushq %rbp
+; XOP-NEXT: movq %rsp, %rbp
+; XOP-NEXT: andq $-32, %rsp
+; XOP-NEXT: subq $64, %rsp
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vbroadcastss {{.*#+}} xmm3 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
; XOP-NEXT: vpcomgtub %xmm3, %xmm2, %xmm2
@@ -837,15 +1283,147 @@ define <32 x i8> @var_shuffle_zero_v32i8(<32 x i8> %v, <32 x i8> %indices) nounw
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vorps %ymm1, %ymm2, %ymm1
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm3
-; XOP-NEXT: vextractf128 $1, %ymm0, %xmm4
-; XOP-NEXT: vpperm %xmm3, %xmm4, %xmm0, %xmm3
-; XOP-NEXT: vpperm %xmm1, %xmm4, %xmm0, %xmm0
-; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; XOP-NEXT: vpextrb $0, %xmm3, %eax
+; XOP-NEXT: vmovaps %ymm0, (%rsp)
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vmovd %eax, %xmm0
+; XOP-NEXT: vpextrb $1, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $2, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $3, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $4, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $5, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $6, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $7, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $8, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $9, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $10, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $11, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $12, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $13, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $14, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $15, %xmm3, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; XOP-NEXT: vpextrb $0, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vmovd %eax, %xmm3
+; XOP-NEXT: vpextrb $1, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $2, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $3, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $4, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $5, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $6, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $7, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $8, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $9, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $10, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $11, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $12, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $13, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $14, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; XOP-NEXT: vpextrb $15, %xmm1, %eax
+; XOP-NEXT: andl $31, %eax
+; XOP-NEXT: movzbl (%rsp,%rax), %eax
+; XOP-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOP-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: movq %rbp, %rsp
+; XOP-NEXT: popq %rbp
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v32i8:
; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
; AVX1-NEXT: vpmaxub %xmm3, %xmm2, %xmm4
@@ -855,46 +1433,429 @@ define <32 x i8> @var_shuffle_zero_v32i8(<32 x i8> %v, <32 x i8> %indices) nounw
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vbroadcastss {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vpcmpgtb %xmm4, %xmm3, %xmm5
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm7
-; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm3
-; AVX1-NEXT: vpblendvb %xmm5, %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtb %xmm4, %xmm1, %xmm4
-; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm5
-; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpblendvb %xmm4, %xmm5, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT: vpextrb $0, %xmm3, %eax
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpextrb $1, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $2, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $3, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $4, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $5, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $6, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $7, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $8, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $9, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $10, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $11, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $12, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $13, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $14, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm3, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vmovd %eax, %xmm3
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rsp,%rax), %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v32i8:
; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT: vpextrb $0, %xmm3, %eax
+; AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vmovd %eax, %xmm0
+; AVX2-NEXT: vpextrb $1, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $2, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $3, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $4, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $5, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $6, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $7, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $8, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $9, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $10, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $11, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $12, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $13, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $14, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $15, %xmm3, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vmovd %eax, %xmm3
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andl $31, %eax
+; AVX2-NEXT: movzbl (%rsp,%rax), %eax
+; AVX2-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_shuffle_zero_v32i8:
; AVX512F: # %bb.0:
+; AVX512F-NEXT: pushq %rbp
+; AVX512F-NEXT: movq %rsp, %rbp
+; AVX512F-NEXT: andq $-32, %rsp
+; AVX512F-NEXT: subq $64, %rsp
; AVX512F-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
; AVX512F-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm2
; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; AVX512F-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512F-NEXT: vpblendvb %ymm1, %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512F-NEXT: vpextrb $0, %xmm3, %eax
+; AVX512F-NEXT: vmovaps %ymm0, (%rsp)
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vmovd %eax, %xmm0
+; AVX512F-NEXT: vpextrb $1, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $2, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $3, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $4, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $5, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $6, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $7, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $8, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $9, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $10, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $11, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $12, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $13, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $14, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $15, %xmm3, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vpextrb $0, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vmovd %eax, %xmm3
+; AVX512F-NEXT: vpextrb $1, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $2, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $3, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $4, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $5, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $6, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $7, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $8, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $9, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $10, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $11, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $12, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $13, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $14, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpextrb $15, %xmm1, %eax
+; AVX512F-NEXT: andl $31, %eax
+; AVX512F-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512F-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT: movq %rbp, %rsp
+; AVX512F-NEXT: popq %rbp
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: var_shuffle_zero_v32i8:
@@ -917,16 +1878,147 @@ define <32 x i8> @var_shuffle_zero_v32i8(<32 x i8> %v, <32 x i8> %indices) nounw
;
; AVX512VLF-LABEL: var_shuffle_zero_v32i8:
; AVX512VLF: # %bb.0:
+; AVX512VLF-NEXT: pushq %rbp
+; AVX512VLF-NEXT: movq %rsp, %rbp
+; AVX512VLF-NEXT: andq $-32, %rsp
+; AVX512VLF-NEXT: subq $64, %rsp
; AVX512VLF-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
; AVX512VLF-NEXT: vpcmpeqb %ymm2, %ymm1, %ymm2
; AVX512VLF-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX512VLF-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm3
-; AVX512VLF-NEXT: vpshufb %ymm1, %ymm3, %ymm3
-; AVX512VLF-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512VLF-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512VLF-NEXT: vpcmpgtb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512VLF-NEXT: vpternlogq {{.*#+}} ymm1 = ymm3 ^ (ymm1 & (ymm0 ^ ymm3))
-; AVX512VLF-NEXT: vpandn %ymm1, %ymm2, %ymm0
+; AVX512VLF-NEXT: vextracti128 $1, %ymm1, %xmm3
+; AVX512VLF-NEXT: vpextrb $0, %xmm3, %eax
+; AVX512VLF-NEXT: vmovaps %ymm0, (%rsp)
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vmovd %eax, %xmm0
+; AVX512VLF-NEXT: vpextrb $1, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $2, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $3, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $4, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $5, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $6, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $7, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $8, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $9, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $10, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $11, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $12, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $13, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $14, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $15, %xmm3, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512VLF-NEXT: vpextrb $0, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vmovd %eax, %xmm3
+; AVX512VLF-NEXT: vpextrb $1, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $2, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $3, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $4, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $5, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $6, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $7, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $8, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $9, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $10, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $11, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $12, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $13, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $14, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX512VLF-NEXT: vpextrb $15, %xmm1, %eax
+; AVX512VLF-NEXT: andl $31, %eax
+; AVX512VLF-NEXT: movzbl (%rsp,%rax), %eax
+; AVX512VLF-NEXT: vpinsrb $15, %eax, %xmm3, %xmm1
+; AVX512VLF-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512VLF-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX512VLF-NEXT: movq %rbp, %rsp
+; AVX512VLF-NEXT: popq %rbp
; AVX512VLF-NEXT: retq
;
; AVX512VLBW-LABEL: var_shuffle_zero_v32i8:
@@ -1125,24 +2217,44 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
define <4 x double> @var_shuffle_zero_v4f64(<4 x double> %v, <4 x i64> %indices) nounwind {
; XOP-LABEL: var_shuffle_zero_v4f64:
; XOP: # %bb.0:
+; XOP-NEXT: pushq %rbp
+; XOP-NEXT: movq %rsp, %rbp
+; XOP-NEXT: andq $-32, %rsp
+; XOP-NEXT: subq $64, %rsp
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vpmovsxbq {{.*#+}} xmm3 = [3,3]
; XOP-NEXT: vpcomgtuq %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpcomgtuq %xmm3, %xmm1, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vorps %ymm1, %ymm2, %ymm1
-; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm3
+; XOP-NEXT: vmovq %xmm1, %rax
+; XOP-NEXT: andl $3, %eax
+; XOP-NEXT: vpextrq $1, %xmm1, %rcx
+; XOP-NEXT: andl $3, %ecx
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
-; XOP-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; XOP-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; XOP-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; XOP-NEXT: vpermil2pd $0, %ymm1, %ymm3, %ymm0, %ymm0
+; XOP-NEXT: vmovq %xmm1, %rdx
+; XOP-NEXT: andl $3, %edx
+; XOP-NEXT: vpextrq $1, %xmm1, %rsi
+; XOP-NEXT: andl $3, %esi
+; XOP-NEXT: vmovaps %ymm0, (%rsp)
+; XOP-NEXT: vmovsd (%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; XOP-NEXT: vmovsd (%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; XOP-NEXT: vmovsd (%rsp,%rdx,8), %xmm3 # xmm3 = mem[0],zero
+; XOP-NEXT: vmovsd (%rsp,%rsi,8), %xmm4 # xmm4 = mem[0],zero
+; XOP-NEXT: vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; XOP-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; XOP-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: movq %rbp, %rsp
+; XOP-NEXT: popq %rbp
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v4f64:
; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
; AVX1-NEXT: # xmm3 = mem[0,0]
@@ -1154,37 +2266,59 @@ define <4 x double> @var_shuffle_zero_v4f64(<4 x double> %v, <4 x i64> %indices)
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm3
-; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX1-NEXT: andl $3, %ecx
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm1, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
-; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; AVX1-NEXT: vpermilpd %ymm1, %ymm3, %ymm3
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: vpermilpd %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vblendvpd %ymm4, %ymm3, %ymm0, %ymm0
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: vmovsd (%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; AVX1-NEXT: vmovsd (%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX1-NEXT: vmovsd (%rsp,%rdx,8), %xmm3 # xmm3 = mem[0],zero
+; AVX1-NEXT: vmovsd (%rsp,%rsi,8), %xmm4 # xmm4 = mem[0],zero
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v4f64:
; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775811,9223372036854775811,9223372036854775811,9223372036854775811]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpaddq %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [2,2,2,2]
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm3
-; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX2-NEXT: vpermilpd %ymm1, %ymm4, %ymm4
-; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX2-NEXT: vpermilpd %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vblendvpd %ymm3, %ymm4, %ymm0, %ymm0
+; AVX2-NEXT: vmovq %xmm1, %rax
+; AVX2-NEXT: andl $3, %eax
+; AVX2-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX2-NEXT: andl $3, %ecx
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovq %xmm1, %rdx
+; AVX2-NEXT: andl $3, %edx
+; AVX2-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX2-NEXT: andl $3, %esi
+; AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; AVX2-NEXT: vmovq (%rsp,%rax,8), %xmm0 # xmm0 = mem[0],zero
+; AVX2-NEXT: vmovq (%rsp,%rcx,8), %xmm1 # xmm1 = mem[0],zero
+; AVX2-NEXT: vmovq (%rsp,%rdx,8), %xmm3 # xmm3 = mem[0],zero
+; AVX2-NEXT: vmovq (%rsp,%rsi,8), %xmm4 # xmm4 = mem[0],zero
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: var_shuffle_zero_v4f64:
@@ -1282,20 +2416,60 @@ define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwi
define <8 x float> @var_shuffle_zero_v8f32(<8 x float> %v, <8 x i32> %indices) nounwind {
; XOP-LABEL: var_shuffle_zero_v8f32:
; XOP: # %bb.0:
+; XOP-NEXT: pushq %rbp
+; XOP-NEXT: movq %rsp, %rbp
+; XOP-NEXT: andq $-32, %rsp
+; XOP-NEXT: subq $64, %rsp
; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2
; XOP-NEXT: vbroadcastss {{.*#+}} xmm3 = [7,7,7,7]
; XOP-NEXT: vpcomgtud %xmm3, %xmm2, %xmm2
; XOP-NEXT: vpcomgtud %xmm3, %xmm1, %xmm3
; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; XOP-NEXT: vorps %ymm1, %ymm2, %ymm1
-; XOP-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3,2,3]
-; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; XOP-NEXT: vpermil2ps $0, %ymm1, %ymm3, %ymm0, %ymm0
+; XOP-NEXT: vmovd %xmm1, %esi
+; XOP-NEXT: vpextrd $1, %xmm1, %edi
+; XOP-NEXT: vpextrd $2, %xmm1, %r8d
+; XOP-NEXT: vpextrd $3, %xmm1, %r9d
+; XOP-NEXT: vextractf128 $1, %ymm1, %xmm1
+; XOP-NEXT: vmovd %xmm1, %r10d
+; XOP-NEXT: vpextrd $1, %xmm1, %edx
+; XOP-NEXT: vpextrd $2, %xmm1, %ecx
+; XOP-NEXT: vpextrd $3, %xmm1, %eax
+; XOP-NEXT: vmovaps %ymm0, (%rsp)
+; XOP-NEXT: andl $7, %esi
+; XOP-NEXT: vmovss (%rsp,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %edi
+; XOP-NEXT: vmovss (%rsp,%rdi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %r8d
+; XOP-NEXT: vmovss (%rsp,%r8,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %r9d
+; XOP-NEXT: vmovss (%rsp,%r9,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %r10d
+; XOP-NEXT: vmovss (%rsp,%r10,4), %xmm5 # xmm5 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %edx
+; XOP-NEXT: vmovss (%rsp,%rdx,4), %xmm6 # xmm6 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %ecx
+; XOP-NEXT: vmovss (%rsp,%rcx,4), %xmm7 # xmm7 = mem[0],zero,zero,zero
+; XOP-NEXT: andl $7, %eax
+; XOP-NEXT: vmovss (%rsp,%rax,4), %xmm8 # xmm8 = mem[0],zero,zero,zero
+; XOP-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; XOP-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
+; XOP-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[0]
+; XOP-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; XOP-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; XOP-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
+; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
; XOP-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; XOP-NEXT: movq %rbp, %rsp
+; XOP-NEXT: popq %rbp
; XOP-NEXT: retq
;
; AVX1-LABEL: var_shuffle_zero_v8f32:
; AVX1: # %bb.0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm3 = [8,8,8,8]
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm4
@@ -1304,26 +2478,90 @@ define <8 x float> @var_shuffle_zero_v8f32(<8 x float> %v, <8 x i32> %indices) n
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
-; AVX1-NEXT: vpcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}+16(%rip), %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
-; AVX1-NEXT: vpermilps %ymm1, %ymm4, %ymm4
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: vblendvps %ymm3, %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vmovd %xmm1, %esi
+; AVX1-NEXT: vpextrd $1, %xmm1, %edi
+; AVX1-NEXT: vpextrd $2, %xmm1, %r8d
+; AVX1-NEXT: vpextrd $3, %xmm1, %r9d
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovd %xmm1, %r10d
+; AVX1-NEXT: vpextrd $1, %xmm1, %edx
+; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX1-NEXT: vpextrd $3, %xmm1, %eax
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $7, %esi
+; AVX1-NEXT: vmovss (%rsp,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %edi
+; AVX1-NEXT: vmovss (%rsp,%rdi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %r8d
+; AVX1-NEXT: vmovss (%rsp,%r8,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %r9d
+; AVX1-NEXT: vmovss (%rsp,%r9,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %r10d
+; AVX1-NEXT: vmovss (%rsp,%r10,4), %xmm5 # xmm5 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %edx
+; AVX1-NEXT: vmovss (%rsp,%rdx,4), %xmm6 # xmm6 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %ecx
+; AVX1-NEXT: vmovss (%rsp,%rcx,4), %xmm7 # xmm7 = mem[0],zero,zero,zero
+; AVX1-NEXT: andl $7, %eax
+; AVX1-NEXT: vmovss (%rsp,%rax,4), %xmm8 # xmm8 = mem[0],zero,zero,zero
+; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_shuffle_zero_v8f32:
; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [8,8,8,8,8,8,8,8]
; AVX2-NEXT: vpmaxud %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm2
; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1
-; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vmovd %xmm1, %esi
+; AVX2-NEXT: vpextrd $1, %xmm1, %edi
+; AVX2-NEXT: vpextrd $2, %xmm1, %r8d
+; AVX2-NEXT: vpextrd $3, %xmm1, %r9d
+; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vmovd %xmm1, %r10d
+; AVX2-NEXT: vpextrd $1, %xmm1, %edx
+; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
+; AVX2-NEXT: vpextrd $3, %xmm1, %eax
+; AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; AVX2-NEXT: andl $7, %esi
+; AVX2-NEXT: vmovss (%rsp,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %edi
+; AVX2-NEXT: vmovss (%rsp,%rdi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %r8d
+; AVX2-NEXT: vmovss (%rsp,%r8,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %r9d
+; AVX2-NEXT: vmovss (%rsp,%r9,4), %xmm4 # xmm4 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %r10d
+; AVX2-NEXT: vmovss (%rsp,%r10,4), %xmm5 # xmm5 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %edx
+; AVX2-NEXT: vmovss (%rsp,%rdx,4), %xmm6 # xmm6 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %ecx
+; AVX2-NEXT: vmovss (%rsp,%rcx,4), %xmm7 # xmm7 = mem[0],zero,zero,zero
+; AVX2-NEXT: andl $7, %eax
+; AVX2-NEXT: vmovss (%rsp,%rax,4), %xmm8 # xmm8 = mem[0],zero,zero,zero
+; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
+; AVX2-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: var_shuffle_zero_v8f32:
diff --git a/llvm/test/CodeGen/X86/vector-bo-select.ll b/llvm/test/CodeGen/X86/vector-bo-select.ll
index 11e7fe85d0239..0e37e5a2c72c6 100644
--- a/llvm/test/CodeGen/X86/vector-bo-select.ll
+++ b/llvm/test/CodeGen/X86/vector-bo-select.ll
@@ -468,29 +468,29 @@ define <16 x float> @fsub_v16f32_swap(<16 x i1> %b, <16 x float> noundef %x, <16
;
; SSE42-LABEL: fsub_v16f32_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm8
; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pandn %xmm7, %xmm8
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm0
; SSE42-NEXT: psrad $31, %xmm0
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT: subps %xmm6, %xmm1
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: subps %xmm0, %xmm1
; SSE42-NEXT: subps %xmm7, %xmm2
-; SSE42-NEXT: subps %xmm8, %xmm3
-; SSE42-NEXT: subps %xmm0, %xmm4
+; SSE42-NEXT: subps %xmm9, %xmm3
+; SSE42-NEXT: subps %xmm8, %xmm4
; SSE42-NEXT: movaps %xmm1, %xmm0
; SSE42-NEXT: movaps %xmm2, %xmm1
; SSE42-NEXT: movaps %xmm3, %xmm2
@@ -562,33 +562,32 @@ define <16 x float> @fsub_v16f32_commute_swap(<16 x i1> %b, <16 x float> noundef
;
; SSE42-LABEL: fsub_v16f32_commute_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: movaps %xmm2, %xmm8
-; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm2
-; SSE42-NEXT: psrad $31, %xmm2
-; SSE42-NEXT: pandn %xmm7, %xmm2
+; SSE42-NEXT: movaps %xmm3, %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm3
+; SSE42-NEXT: psrad $31, %xmm3
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: subps %xmm1, %xmm6
-; SSE42-NEXT: subps %xmm8, %xmm7
-; SSE42-NEXT: subps %xmm3, %xmm2
-; SSE42-NEXT: subps %xmm4, %xmm5
-; SSE42-NEXT: movaps %xmm6, %xmm0
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: subps %xmm1, %xmm0
+; SSE42-NEXT: subps %xmm2, %xmm7
+; SSE42-NEXT: subps %xmm8, %xmm9
+; SSE42-NEXT: subps %xmm4, %xmm3
; SSE42-NEXT: movaps %xmm7, %xmm1
-; SSE42-NEXT: movaps %xmm5, %xmm3
+; SSE42-NEXT: movaps %xmm9, %xmm2
; SSE42-NEXT: retq
;
; AVX2-LABEL: fsub_v16f32_commute_swap:
@@ -2407,29 +2406,29 @@ define <16 x i32> @sub_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i3
;
; SSE42-LABEL: sub_v16i32_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm8
; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pandn %xmm7, %xmm8
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm0
; SSE42-NEXT: psrad $31, %xmm0
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm0
-; SSE42-NEXT: psubd %xmm6, %xmm1
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: psubd %xmm0, %xmm1
; SSE42-NEXT: psubd %xmm7, %xmm2
-; SSE42-NEXT: psubd %xmm8, %xmm3
-; SSE42-NEXT: psubd %xmm0, %xmm4
+; SSE42-NEXT: psubd %xmm9, %xmm3
+; SSE42-NEXT: psubd %xmm8, %xmm4
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: movdqa %xmm2, %xmm1
; SSE42-NEXT: movdqa %xmm3, %xmm2
@@ -2501,33 +2500,32 @@ define <16 x i32> @sub_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x,
;
; SSE42-LABEL: sub_v16i32_commute_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: movdqa %xmm2, %xmm8
-; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm2
-; SSE42-NEXT: psrad $31, %xmm2
-; SSE42-NEXT: pandn %xmm7, %xmm2
+; SSE42-NEXT: movdqa %xmm3, %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm3
+; SSE42-NEXT: psrad $31, %xmm3
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: psubd %xmm1, %xmm6
-; SSE42-NEXT: psubd %xmm8, %xmm7
-; SSE42-NEXT: psubd %xmm3, %xmm2
-; SSE42-NEXT: psubd %xmm4, %xmm5
-; SSE42-NEXT: movdqa %xmm6, %xmm0
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: psubd %xmm1, %xmm0
+; SSE42-NEXT: psubd %xmm2, %xmm7
+; SSE42-NEXT: psubd %xmm8, %xmm9
+; SSE42-NEXT: psubd %xmm4, %xmm3
; SSE42-NEXT: movdqa %xmm7, %xmm1
-; SSE42-NEXT: movdqa %xmm5, %xmm3
+; SSE42-NEXT: movdqa %xmm9, %xmm2
; SSE42-NEXT: retq
;
; AVX2-LABEL: sub_v16i32_commute_swap:
@@ -3371,41 +3369,41 @@ define <16 x i32> @shl_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i3
;
; SSE42-LABEL: shl_v16i32_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm8
; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pandn %xmm7, %xmm8
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pslld $23, %xmm6
-; SSE42-NEXT: movdqa {{.*#+}} xmm9 = [1065353216,1065353216,1065353216,1065353216]
-; SSE42-NEXT: paddd %xmm9, %xmm6
-; SSE42-NEXT: cvttps2dq %xmm6, %xmm0
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: pslld $23, %xmm0
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; SSE42-NEXT: paddd %xmm5, %xmm0
+; SSE42-NEXT: cvttps2dq %xmm0, %xmm0
; SSE42-NEXT: pmulld %xmm1, %xmm0
; SSE42-NEXT: pslld $23, %xmm7
-; SSE42-NEXT: paddd %xmm9, %xmm7
+; SSE42-NEXT: paddd %xmm5, %xmm7
; SSE42-NEXT: cvttps2dq %xmm7, %xmm1
; SSE42-NEXT: pmulld %xmm2, %xmm1
-; SSE42-NEXT: pslld $23, %xmm8
-; SSE42-NEXT: paddd %xmm9, %xmm8
-; SSE42-NEXT: cvttps2dq %xmm8, %xmm2
+; SSE42-NEXT: pslld $23, %xmm9
+; SSE42-NEXT: paddd %xmm5, %xmm9
+; SSE42-NEXT: cvttps2dq %xmm9, %xmm2
; SSE42-NEXT: pmulld %xmm3, %xmm2
-; SSE42-NEXT: pslld $23, %xmm5
-; SSE42-NEXT: paddd %xmm9, %xmm5
-; SSE42-NEXT: cvttps2dq %xmm5, %xmm3
+; SSE42-NEXT: pslld $23, %xmm8
+; SSE42-NEXT: paddd %xmm5, %xmm8
+; SSE42-NEXT: cvttps2dq %xmm8, %xmm3
; SSE42-NEXT: pmulld %xmm4, %xmm3
; SSE42-NEXT: retq
;
@@ -3508,11 +3506,16 @@ define <16 x i32> @shl_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x,
;
; SSE42-LABEL: shl_v16i32_commute_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm8
; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pandn %xmm7, %xmm8
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
@@ -3522,28 +3525,23 @@ define <16 x i32> @shl_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x,
; SSE42-NEXT: pslld $31, %xmm6
; SSE42-NEXT: psrad $31, %xmm6
; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5
; SSE42-NEXT: pslld $23, %xmm1
-; SSE42-NEXT: movdqa {{.*#+}} xmm9 = [1065353216,1065353216,1065353216,1065353216]
-; SSE42-NEXT: paddd %xmm9, %xmm1
+; SSE42-NEXT: movdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
+; SSE42-NEXT: paddd %xmm5, %xmm1
; SSE42-NEXT: cvttps2dq %xmm1, %xmm0
; SSE42-NEXT: pmulld %xmm6, %xmm0
; SSE42-NEXT: pslld $23, %xmm2
-; SSE42-NEXT: paddd %xmm9, %xmm2
+; SSE42-NEXT: paddd %xmm5, %xmm2
; SSE42-NEXT: cvttps2dq %xmm2, %xmm1
; SSE42-NEXT: pmulld %xmm7, %xmm1
; SSE42-NEXT: pslld $23, %xmm3
-; SSE42-NEXT: paddd %xmm9, %xmm3
+; SSE42-NEXT: paddd %xmm5, %xmm3
; SSE42-NEXT: cvttps2dq %xmm3, %xmm2
-; SSE42-NEXT: pmulld %xmm8, %xmm2
+; SSE42-NEXT: pmulld %xmm9, %xmm2
; SSE42-NEXT: pslld $23, %xmm4
-; SSE42-NEXT: paddd %xmm9, %xmm4
+; SSE42-NEXT: paddd %xmm5, %xmm4
; SSE42-NEXT: cvttps2dq %xmm4, %xmm3
-; SSE42-NEXT: pmulld %xmm5, %xmm3
+; SSE42-NEXT: pmulld %xmm8, %xmm3
; SSE42-NEXT: retq
;
; AVX2-LABEL: shl_v16i32_commute_swap:
@@ -4078,85 +4076,85 @@ define <16 x i32> @lshr_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i
;
; SSE42-LABEL: lshr_v16i32_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm8
; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pandn %xmm7, %xmm8
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm1, %xmm9
-; SSE42-NEXT: psrld %xmm0, %xmm9
-; SSE42-NEXT: pshufd {{.*#+}} xmm10 = xmm6[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm1, %xmm6
+; SSE42-NEXT: psrld %xmm5, %xmm6
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm11
-; SSE42-NEXT: psrld %xmm0, %xmm11
-; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm9[0,1,2,3],xmm11[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrld %xmm10, %xmm11
+; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm6[0,1,2,3],xmm11[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: psrld %xmm6, %xmm0
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[0,1,1,1,4,5,6,7]
-; SSE42-NEXT: psrld %xmm6, %xmm1
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrld %xmm5, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm2, %xmm6
-; SSE42-NEXT: psrld %xmm1, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm7[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm2, %xmm5
+; SSE42-NEXT: psrld %xmm1, %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm10
; SSE42-NEXT: psrld %xmm1, %xmm10
-; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm10[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm5[0,1,2,3],xmm10[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm1
-; SSE42-NEXT: psrld %xmm6, %xmm1
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[0,1,1,1,4,5,6,7]
-; SSE42-NEXT: psrld %xmm6, %xmm2
+; SSE42-NEXT: psrld %xmm5, %xmm1
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrld %xmm5, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3],xmm1[4,5],xmm10[6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm3, %xmm6
-; SSE42-NEXT: psrld %xmm2, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm3, %xmm9
-; SSE42-NEXT: psrld %xmm2, %xmm9
-; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm6[0,1,2,3],xmm9[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm3, %xmm5
+; SSE42-NEXT: psrld %xmm2, %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm3, %xmm7
+; SSE42-NEXT: psrld %xmm2, %xmm7
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm3, %xmm2
-; SSE42-NEXT: psrld %xmm6, %xmm2
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7]
-; SSE42-NEXT: psrld %xmm6, %xmm3
+; SSE42-NEXT: psrld %xmm5, %xmm2
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrld %xmm5, %xmm3
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm4, %xmm6
-; SSE42-NEXT: psrld %xmm3, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm4, %xmm8
-; SSE42-NEXT: psrld %xmm3, %xmm8
-; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm6[0,1,2,3],xmm8[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm4, %xmm5
+; SSE42-NEXT: psrld %xmm3, %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm6[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: psrld %xmm3, %xmm7
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm4, %xmm3
; SSE42-NEXT: psrld %xmm5, %xmm3
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrld %xmm5, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm8[2,3],xmm3[4,5],xmm8[6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3],xmm3[4,5],xmm7[6,7]
; SSE42-NEXT: retq
;
; AVX2-LABEL: lshr_v16i32_swap:
@@ -4280,74 +4278,73 @@ define <16 x i32> @lshr_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x,
;
; SSE42-LABEL: lshr_v16i32_commute_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: movdqa %xmm3, %xmm10
-; SSE42-NEXT: movdqa %xmm2, %xmm9
-; SSE42-NEXT: movdqa %xmm1, %xmm8
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: movdqa %xmm3, %xmm8
+; SSE42-NEXT: movdqa %xmm2, %xmm10
+; SSE42-NEXT: movdqa %xmm1, %xmm9
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm3
+; SSE42-NEXT: psrad $31, %xmm3
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm2
; SSE42-NEXT: psrad $31, %xmm2
; SSE42-NEXT: pandn %xmm7, %xmm2
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm1
; SSE42-NEXT: psrad $31, %xmm1
; SSE42-NEXT: pandn %xmm6, %xmm1
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm0
; SSE42-NEXT: psrad $31, %xmm0
; SSE42-NEXT: pandn %xmm5, %xmm0
-; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm3
-; SSE42-NEXT: psrad $31, %xmm3
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm7
; SSE42-NEXT: psrld %xmm6, %xmm7
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm11
; SSE42-NEXT: psrld %xmm6, %xmm11
; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm6
; SSE42-NEXT: psrld %xmm5, %xmm6
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrld %xmm5, %xmm0
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm7
; SSE42-NEXT: psrld %xmm6, %xmm7
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm1, %xmm8
-; SSE42-NEXT: psrld %xmm6, %xmm8
-; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm1, %xmm9
+; SSE42-NEXT: psrld %xmm6, %xmm9
+; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm6
; SSE42-NEXT: psrld %xmm5, %xmm6
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrld %xmm5, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,3],xmm1[4,5],xmm8[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3],xmm1[4,5],xmm9[6,7]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm7
; SSE42-NEXT: psrld %xmm6, %xmm7
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm2, %xmm8
-; SSE42-NEXT: psrld %xmm6, %xmm8
-; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm2, %xmm9
+; SSE42-NEXT: psrld %xmm6, %xmm9
+; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm6
; SSE42-NEXT: psrld %xmm5, %xmm6
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrld %xmm5, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm8[2,3],xmm2[4,5],xmm8[6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm3, %xmm7
@@ -4929,85 +4926,85 @@ define <16 x i32> @ashr_v16i32_swap(<16 x i1> %b, <16 x i32> noundef %x, <16 x i
;
; SSE42-LABEL: ashr_v16i32_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm0[3,3,3,3]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm8
; SSE42-NEXT: psrad $31, %xmm8
-; SSE42-NEXT: pandn %xmm7, %xmm8
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm8
+; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm9
+; SSE42-NEXT: psrad $31, %xmm9
+; SSE42-NEXT: pandn %xmm7, %xmm9
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
; SSE42-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm7
; SSE42-NEXT: psrad $31, %xmm7
; SSE42-NEXT: pandn %xmm6, %xmm7
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm6
-; SSE42-NEXT: psrad $31, %xmm6
-; SSE42-NEXT: pandn %xmm5, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm5
-; SSE42-NEXT: psrad $31, %xmm5
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm5
-; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm6[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm1, %xmm9
-; SSE42-NEXT: psrad %xmm0, %xmm9
-; SSE42-NEXT: pshufd {{.*#+}} xmm10 = xmm6[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm0
+; SSE42-NEXT: psrad $31, %xmm0
+; SSE42-NEXT: pandn %xmm5, %xmm0
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm1, %xmm6
+; SSE42-NEXT: psrad %xmm5, %xmm6
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm10 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm11
-; SSE42-NEXT: psrad %xmm0, %xmm11
-; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm9[0,1,2,3],xmm11[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrad %xmm10, %xmm11
+; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm6[0,1,2,3],xmm11[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm0[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm0
; SSE42-NEXT: psrad %xmm6, %xmm0
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[0,1,1,1,4,5,6,7]
-; SSE42-NEXT: psrad %xmm6, %xmm1
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrad %xmm5, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm2, %xmm6
-; SSE42-NEXT: psrad %xmm1, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm9 = xmm7[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm2, %xmm5
+; SSE42-NEXT: psrad %xmm1, %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm7[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm1 = xmm6[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm10
; SSE42-NEXT: psrad %xmm1, %xmm10
-; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm6[0,1,2,3],xmm10[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm10 = xmm5[0,1,2,3],xmm10[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm1
-; SSE42-NEXT: psrad %xmm6, %xmm1
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[0,1,1,1,4,5,6,7]
-; SSE42-NEXT: psrad %xmm6, %xmm2
+; SSE42-NEXT: psrad %xmm5, %xmm1
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrad %xmm5, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm10[2,3],xmm1[4,5],xmm10[6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm3, %xmm6
-; SSE42-NEXT: psrad %xmm2, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm7[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm3, %xmm9
-; SSE42-NEXT: psrad %xmm2, %xmm9
-; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm6[0,1,2,3],xmm9[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm3, %xmm5
+; SSE42-NEXT: psrad %xmm2, %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm9[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm3, %xmm7
+; SSE42-NEXT: psrad %xmm2, %xmm7
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm3, %xmm2
-; SSE42-NEXT: psrad %xmm6, %xmm2
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,1,1,1,4,5,6,7]
-; SSE42-NEXT: psrad %xmm6, %xmm3
+; SSE42-NEXT: psrad %xmm5, %xmm2
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: psrad %xmm5, %xmm3
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm4, %xmm6
-; SSE42-NEXT: psrad %xmm3, %xmm6
-; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm4, %xmm8
-; SSE42-NEXT: psrad %xmm3, %xmm8
-; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm6[0,1,2,3],xmm8[4,5,6,7]
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm7[2,3],xmm2[4,5],xmm7[6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm4, %xmm5
+; SSE42-NEXT: psrad %xmm3, %xmm5
+; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm6[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm4, %xmm7
+; SSE42-NEXT: psrad %xmm3, %xmm7
+; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm4, %xmm3
; SSE42-NEXT: psrad %xmm5, %xmm3
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm6[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrad %xmm5, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm8[2,3],xmm3[4,5],xmm8[6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3],xmm3[4,5],xmm7[6,7]
; SSE42-NEXT: retq
;
; AVX2-LABEL: ashr_v16i32_swap:
@@ -5131,74 +5128,73 @@ define <16 x i32> @ashr_v16i32_commute_swap(<16 x i1> %b, <16 x i32> noundef %x,
;
; SSE42-LABEL: ashr_v16i32_commute_swap:
; SSE42: # %bb.0:
-; SSE42-NEXT: movdqa %xmm3, %xmm10
-; SSE42-NEXT: movdqa %xmm2, %xmm9
-; SSE42-NEXT: movdqa %xmm1, %xmm8
-; SSE42-NEXT: movdqa %xmm0, %xmm3
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: movdqa %xmm3, %xmm8
+; SSE42-NEXT: movdqa %xmm2, %xmm10
+; SSE42-NEXT: movdqa %xmm1, %xmm9
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
+; SSE42-NEXT: pslld $31, %xmm3
+; SSE42-NEXT: psrad $31, %xmm3
+; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm2
; SSE42-NEXT: psrad $31, %xmm2
; SSE42-NEXT: pandn %xmm7, %xmm2
-; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm1
; SSE42-NEXT: psrad $31, %xmm1
; SSE42-NEXT: pandn %xmm6, %xmm1
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
+; SSE42-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE42-NEXT: pslld $31, %xmm0
; SSE42-NEXT: psrad $31, %xmm0
; SSE42-NEXT: pandn %xmm5, %xmm0
-; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSE42-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
-; SSE42-NEXT: pslld $31, %xmm3
-; SSE42-NEXT: psrad $31, %xmm3
-; SSE42-NEXT: pandn {{[0-9]+}}(%rsp), %xmm3
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm7
; SSE42-NEXT: psrad %xmm6, %xmm7
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm11
; SSE42-NEXT: psrad %xmm6, %xmm11
; SSE42-NEXT: pblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm0, %xmm6
; SSE42-NEXT: psrad %xmm5, %xmm6
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrad %xmm5, %xmm0
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3],xmm0[4,5],xmm11[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,3,2,3]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm7
; SSE42-NEXT: psrad %xmm6, %xmm7
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm1, %xmm8
-; SSE42-NEXT: psrad %xmm6, %xmm8
-; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm1, %xmm9
+; SSE42-NEXT: psrad %xmm6, %xmm9
+; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, %xmm6
; SSE42-NEXT: psrad %xmm5, %xmm6
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm9[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrad %xmm5, %xmm1
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm8[2,3],xmm1[4,5],xmm8[6,7]
-; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm10[2,3,2,3]
+; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm9[2,3],xmm1[4,5],xmm9[6,7]
+; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm7
; SSE42-NEXT: psrad %xmm6, %xmm7
-; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm10[2,3,3,3,4,5,6,7]
-; SSE42-NEXT: movdqa %xmm2, %xmm8
-; SSE42-NEXT: psrad %xmm6, %xmm8
-; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm7[4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm8[2,3,3,3,4,5,6,7]
+; SSE42-NEXT: movdqa %xmm2, %xmm9
+; SSE42-NEXT: psrad %xmm6, %xmm9
+; SSE42-NEXT: pblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,1,1,4,5,6,7]
; SSE42-NEXT: movdqa %xmm2, %xmm6
; SSE42-NEXT: psrad %xmm5, %xmm6
-; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[0,1,1,1,4,5,6,7]
+; SSE42-NEXT: pshuflw {{.*#+}} xmm5 = xmm8[0,1,1,1,4,5,6,7]
; SSE42-NEXT: psrad %xmm5, %xmm2
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
-; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm8[2,3],xmm2[4,5],xmm8[6,7]
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm9[2,3],xmm2[4,5],xmm9[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
; SSE42-NEXT: pshuflw {{.*#+}} xmm6 = xmm5[2,3,3,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm3, %xmm7
More information about the llvm-commits
mailing list