[llvm] r359378 - [X86][AVX] Merge mask select with shuffles across extract_subvector (PR40332)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 27 06:35:32 PDT 2019
Author: rksimon
Date: Sat Apr 27 06:35:32 2019
New Revision: 359378
URL: http://llvm.org/viewvc/llvm-project?rev=359378&view=rev
Log:
[X86][AVX] Merge mask select with shuffles across extract_subvector (PR40332)
Fixes PR40332 in the limited case where we're selecting between a target shuffle and a zero vector.
We can extend this in the future to handle more opcodes and non-zero selections.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=359378&r1=359377&r2=359378&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Apr 27 06:35:32 2019
@@ -35326,6 +35326,42 @@ static SDValue combineSelect(SDNode *N,
return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
}
+ // AVX512 - Extend select with zero to merge with target shuffle.
+ // select(mask, extract_subvector(shuffle(x)), zero) -->
+ // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
+ // TODO - support non target shuffles as well.
+ if (Subtarget.hasAVX512() && CondVT.isVector() &&
+ CondVT.getVectorElementType() == MVT::i1) {
+ auto SelectableOp = [&TLI](SDValue Op) {
+ return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+ isTargetShuffle(Op.getOperand(0).getOpcode()) &&
+ isNullConstant(Op.getOperand(1)) &&
+ TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
+ Op.hasOneUse() && Op.getOperand(0).hasOneUse();
+ };
+
+ bool SelectableLHS = SelectableOp(LHS);
+ bool SelectableRHS = SelectableOp(RHS);
+ bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
+ bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
+
+ if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
+ EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
+ : RHS.getOperand(0).getValueType();
+ unsigned NumSrcElts = SrcVT.getVectorNumElements();
+ EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
+ LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
+ VT.getSizeInBits());
+ RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
+ VT.getSizeInBits());
+ Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
+ DAG.getUNDEF(SrcCondVT), Cond,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
+ return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
+ }
+ }
+
if (SDValue V = combineSelectOfTwoConstants(N, DAG))
return V;
@@ -42444,11 +42480,15 @@ static SDValue combineInsertSubvector(SD
unsigned IdxVal = N->getConstantOperandVal(2);
MVT SubVecVT = SubVec.getSimpleValueType();
- if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
- // Inserting zeros into zeros is a nop.
- if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
- return getZeroVector(OpVT, Subtarget, DAG, dl);
+ if (Vec.isUndef() && SubVec.isUndef())
+ return DAG.getUNDEF(OpVT);
+ // Inserting undefs/zeros into zeros/undefs is a zero vector.
+ if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
+ (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
+ return getZeroVector(OpVT, Subtarget, DAG, dl);
+
+ if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
// If we're inserting into a zero vector and then into a larger zero vector,
// just insert into the larger zero vector directly.
if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
Modified: llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll?rev=359378&r1=359377&r2=359378&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll Sat Apr 27 06:35:32 2019
@@ -34,11 +34,10 @@ define <8 x i16> @test_masked_16xi16_to_
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [8,6,12,4,7,9,14,8,8,6,12,4,7,9,14,8]
-; CHECK-NEXT: # ymm2 = mem[0,1,0,1]
-; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [8,6,12,4,7,9,14,8]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 8, i32 6, i32 12, i32 4, i32 7, i32 9, i32 14, i32 8>
@@ -65,11 +64,10 @@ define <8 x i16> @test_masked_16xi16_to_
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,12,9,4,14,15,12,14,4,12,9,4,14,15,12,14]
-; CHECK-NEXT: # ymm2 = mem[0,1,0,1]
-; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [4,12,9,4,14,15,12,14]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 4, i32 12, i32 9, i32 4, i32 14, i32 15, i32 12, i32 14>
@@ -96,11 +94,10 @@ define <8 x i16> @test_masked_16xi16_to_
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [4,11,14,10,7,1,6,9,4,11,14,10,7,1,6,9]
-; CHECK-NEXT: # ymm2 = mem[0,1,0,1]
-; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [4,11,14,10,7,1,6,9]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 4, i32 11, i32 14, i32 10, i32 7, i32 1, i32 6, i32 9>
@@ -139,11 +136,10 @@ define <8 x i16> @test_masked_16xi16_to_
define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [14,15,7,13,4,12,8,0,14,15,7,13,4,12,8,0]
-; CHECK-NEXT: # ymm2 = mem[0,1,0,1]
-; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [14,15,7,13,4,12,8,0]
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> <i32 14, i32 15, i32 7, i32 13, i32 4, i32 12, i32 8, i32 0>
@@ -482,11 +478,11 @@ define <8 x i16> @test_masked_32xi16_to_
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [22,27,7,10,13,21,5,14]
-; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2w %ymm0, %ymm2, %ymm3
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm3 = [22,27,7,10,13,21,5,14]
+; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w %ymm0, %ymm3, %ymm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 6, i32 11, i32 23, i32 26, i32 29, i32 5, i32 21, i32 30>
@@ -515,9 +511,9 @@ define <8 x i16> @test_masked_z_32xi16_t
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [1,21,27,10,8,19,14,5]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2w %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w %ymm3, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 1, i32 21, i32 27, i32 10, i32 8, i32 19, i32 14, i32 5>
@@ -546,9 +542,9 @@ define <8 x i16> @test_masked_z_32xi16_t
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [15,13,18,16,9,11,26,8]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2w %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w %ymm3, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 15, i32 13, i32 18, i32 16, i32 9, i32 11, i32 26, i32 8>
@@ -589,9 +585,9 @@ define <8 x i16> @test_masked_z_32xi16_t
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [17,0,23,10,1,8,7,30]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2w %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vptestnmw %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w %ymm3, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> <i32 17, i32 0, i32 23, i32 10, i32 1, i32 8, i32 7, i32 30>
@@ -782,11 +778,11 @@ define <8 x i16> @test_masked_32xi16_to_
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [16,17,5,1,14,14,13,17]
-; CHECK-NEXT: vmovdqa 32(%rdi), %ymm2
-; CHECK-NEXT: vpermt2w (%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [16,17,5,1,14,14,13,17]
+; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w (%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -816,11 +812,11 @@ define <8 x i16> @test_masked_32xi16_to_
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [7,6,4,6,12,4,27,1]
-; CHECK-NEXT: vmovdqa 32(%rdi), %ymm2
-; CHECK-NEXT: vpermt2w (%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [7,6,4,6,12,4,27,1]
+; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w (%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -850,11 +846,11 @@ define <8 x i16> @test_masked_32xi16_to_
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [6,18,0,4,10,25,22,10]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [6,18,0,4,10,25,22,10]
+; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -897,11 +893,11 @@ define <8 x i16> @test_masked_32xi16_to_
define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp, <8 x i16> %mask) {
; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [19,1,5,31,9,12,17,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpermt2w 32(%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [19,1,5,31,9,12,17,9]
+; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vptestnmw %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2w 32(%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -954,9 +950,9 @@ define <4 x i32> @test_masked_z_8xi32_to
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask0:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [4,0,3,2]
-; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 4, i32 0, i32 3, i32 2>
@@ -983,9 +979,9 @@ define <4 x i32> @test_masked_z_8xi32_to
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [3,0,7,3]
-; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 3, i32 0, i32 7, i32 3>
@@ -1052,9 +1048,9 @@ define <4 x i32> @test_masked_z_8xi32_to
; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask3:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [5,3,2,5]
-; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> <i32 5, i32 3, i32 2, i32 5>
@@ -1374,11 +1370,11 @@ define <4 x i32> @test_masked_16xi32_to_
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2
-; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [0,2,4,12,4,6,4,12]
-; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3
+; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,12,4,6,4,12]
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermi2d %ymm3, %ymm0, %ymm2 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 12>
@@ -1407,9 +1403,9 @@ define <4 x i32> @test_masked_z_16xi32_t
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [5,1,3,4]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 13, i32 9, i32 11, i32 12>
@@ -1438,9 +1434,9 @@ define <4 x i32> @test_masked_z_16xi32_t
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,13,0]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2d %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2d %ymm3, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 1, i32 1, i32 13, i32 0>
@@ -1481,9 +1477,9 @@ define <4 x i32> @test_masked_z_16xi32_t
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [3,0,0,13]
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2d %ymm3, %ymm2, %ymm0
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2d %ymm3, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 3, i32 0, i32 0, i32 13>
@@ -1669,11 +1665,11 @@ define <4 x i32> @test_masked_16xi32_to_
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [13,0,0,6]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [13,0,0,6]
+; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -1703,11 +1699,11 @@ define <4 x i32> @test_masked_16xi32_to_
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa 32(%rdi), %ymm1
-; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [15,5,3,2,15,5,7,6]
-; CHECK-NEXT: vpermi2d (%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa 32(%rdi), %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [15,5,3,2,15,5,7,6]
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermi2d (%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -1737,11 +1733,11 @@ define <4 x i32> @test_masked_16xi32_to_
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [2,15,6,9]
-; CHECK-NEXT: vmovdqa (%rdi), %ymm2
-; CHECK-NEXT: vpermt2d 32(%rdi), %ymm1, %ymm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [2,15,6,9]
+; CHECK-NEXT: vmovdqa (%rdi), %ymm1
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermt2d 32(%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -1854,9 +1850,9 @@ define <2 x i64> @test_masked_4xi64_to_2
define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,0,2,3]
; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,2,3]
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> <i32 2, i32 0>
@@ -1881,9 +1877,9 @@ define <2 x i64> @test_masked_4xi64_to_2
define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,2,3]
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> <i32 2, i32 1>
@@ -2248,9 +2244,9 @@ define <2 x i64> @test_masked_8xi64_to_2
define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec, <2 x i64> %mask) {
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpermq {{.*#+}} zmm0 = zmm0[3,0,2,3,7,4,6,7]
; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,2,3,7,4,6,7]
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> <i32 3, i32 0>
@@ -2277,9 +2273,9 @@ define <2 x i64> @test_masked_z_8xi64_to
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mask1:
; CHECK: # %bb.0:
; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,2,3]
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> <i32 6, i32 5>
@@ -2702,10 +2698,10 @@ define <4 x float> @test_masked_z_8xfloa
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} xmm2 = [1,3,5,0]
-; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm0
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
+; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 0>
@@ -2733,10 +2729,10 @@ define <4 x float> @test_masked_z_8xfloa
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} xmm2 = [3,2,7,0]
-; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm0
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
+; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 3, i32 2, i32 7, i32 0>
@@ -2775,10 +2771,10 @@ define <4 x float> @test_masked_z_8xfloa
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask3:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps {{.*#+}} xmm2 = [3,3,5,2]
-; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm0
-; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm1, %k1
+; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> <i32 3, i32 3, i32 5, i32 2>
@@ -3122,12 +3118,12 @@ define <4 x float> @test_masked_16xfloat
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps {{.*#+}} xmm2 = [12,0,1,2]
-; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3
-; CHECK-NEXT: vpermt2ps %ymm0, %ymm2, %ymm3
-; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vcmpeqps %xmm0, %xmm1, %k1
-; CHECK-NEXT: vmovaps %xmm3, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovaps {{.*#+}} xmm3 = [12,0,1,2]
+; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
+; CHECK-NEXT: vxorps %xmm4, %xmm4, %xmm4
+; CHECK-NEXT: vcmpeqps %xmm4, %xmm1, %k1
+; CHECK-NEXT: vpermt2ps %ymm0, %ymm3, %ymm2 {%k1} {z}
+; CHECK-NEXT: vmovaps %xmm2, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> <i32 4, i32 8, i32 9, i32 10>
@@ -3479,12 +3475,12 @@ define <4 x float> @test_masked_16xfloat
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps 32(%rdi), %ymm1
-; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [0,10,6,15,4,14,6,15]
-; CHECK-NEXT: vpermi2ps (%rdi), %ymm1, %ymm2
-; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovaps 32(%rdi), %ymm2
+; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,10,6,15,4,14,6,15]
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm0, %k1
+; CHECK-NEXT: vpermi2ps (%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -3515,12 +3511,12 @@ define <4 x float> @test_masked_16xfloat
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps 32(%rdi), %ymm1
-; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [4,14,4,14,4,14,6,7]
-; CHECK-NEXT: vpermi2ps (%rdi), %ymm1, %ymm2
-; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovaps 32(%rdi), %ymm2
+; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,14,4,14,4,14,6,7]
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm0, %k1
+; CHECK-NEXT: vpermi2ps (%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -3564,12 +3560,12 @@ define <4 x float> @test_masked_16xfloat
define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp, <4 x float> %mask) {
; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovaps {{.*#+}} xmm1 = [3,3,15,9]
-; CHECK-NEXT: vmovaps (%rdi), %ymm2
-; CHECK-NEXT: vpermt2ps 32(%rdi), %ymm1, %ymm2
-; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k1
-; CHECK-NEXT: vmovaps %xmm2, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovaps {{.*#+}} xmm2 = [3,3,15,9]
+; CHECK-NEXT: vmovaps (%rdi), %ymm1
+; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vcmpeqps %xmm3, %xmm0, %k1
+; CHECK-NEXT: vpermt2ps 32(%rdi), %ymm2, %ymm1 {%k1} {z}
+; CHECK-NEXT: vmovaps %xmm1, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -3607,10 +3603,10 @@ define <2 x double> @test_masked_4xdoubl
define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mask0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,0,2,3]
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,2,3]
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> <i32 2, i32 0>
@@ -3636,10 +3632,10 @@ define <2 x double> @test_masked_4xdoubl
define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask1(<4 x double> %vec, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,3,2,3]
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1
-; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3,2,3]
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> <i32 1, i32 3>
@@ -4443,12 +4439,11 @@ define <2 x double> @test_masked_8xdoubl
define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1(<8 x double>* %vp, <2 x double> %mask) {
; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [1,4,1,4,1,4,1,4]
-; CHECK-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; CHECK-NEXT: vpermq (%rdi), %zmm1, %zmm1
+; CHECK-NEXT: vmovapd {{.*#+}} xmm1 = [1,4]
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm0, %k1
-; CHECK-NEXT: vmovapd %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
More information about the llvm-commits
mailing list