[llvm] c8ede5e - [X86][SSE] getFauxShuffleMask - add support for INSERT_VECTOR_ELT(EXTRACT_VECTOR_ELT) shuffle pattern
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 10 08:47:00 PDT 2020
Author: Simon Pilgrim
Date: 2020-03-10T15:42:37Z
New Revision: c8ede5e4858e72a9e7511ef93bf9de04f6e0cad1
URL: https://github.com/llvm/llvm-project/commit/c8ede5e4858e72a9e7511ef93bf9de04f6e0cad1
DIFF: https://github.com/llvm/llvm-project/commit/c8ede5e4858e72a9e7511ef93bf9de04f6e0cad1.diff
LOG: [X86][SSE] getFauxShuffleMask - add support for INSERT_VECTOR_ELT(EXTRACT_VECTOR_ELT) shuffle pattern
We already do this for PINSRB/PINSRW and SCALAR_TO_VECTOR.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/avx512-insert-extract.ll
llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
llvm/test/CodeGen/X86/insertelement-shuffle.ll
llvm/test/CodeGen/X86/madd.ll
llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8297935d7ebf..a56dd0427127 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2018,6 +2018,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
+ setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::CONCAT_VECTORS);
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
@@ -7369,37 +7370,63 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
}
return true;
}
- case ISD::SCALAR_TO_VECTOR: {
- // Match against a scalar_to_vector of an extract from a vector,
- // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
- SDValue N0 = N.getOperand(0);
+ case ISD::SCALAR_TO_VECTOR:
+ case ISD::INSERT_VECTOR_ELT: {
+ // Match against a insert_vector_elt/scalar_to_vector of an extract from a
+ // vector, for matching src/dst vector types.
+ // TODO: Merge with PINSRB/PINSRW cases below.
+ // TODO: Handle truncate/zext/shift of scalars.
+ SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
SDValue SrcExtract;
- if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
- N0.getOperand(0).getValueType() == VT) ||
- (N0.getOpcode() == X86ISD::PEXTRW &&
- N0.getOperand(0).getValueType() == MVT::v8i16) ||
- (N0.getOpcode() == X86ISD::PEXTRB &&
- N0.getOperand(0).getValueType() == MVT::v16i8)) {
- SrcExtract = N0;
+ if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ Scl.getOperand(0).getValueType() == VT) ||
+ (Scl.getOpcode() == X86ISD::PEXTRW &&
+ Scl.getOperand(0).getValueType() == MVT::v8i16) ||
+ (Scl.getOpcode() == X86ISD::PEXTRB &&
+ Scl.getOperand(0).getValueType() == MVT::v16i8)) {
+ SrcExtract = Scl;
}
if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
return false;
+ if (Opcode != ISD::SCALAR_TO_VECTOR &&
+ !isa<ConstantSDNode>(N.getOperand(2)))
+ return false;
SDValue SrcVec = SrcExtract.getOperand(0);
EVT SrcVT = SrcVec.getValueType();
unsigned NumSrcElts = SrcVT.getVectorNumElements();
- unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
+ unsigned NumZeros =
+ std::max<int>((NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1, 0);
+
+ if (SrcVT.getSizeInBits() != VT.getSizeInBits() ||
+ (NumSrcElts % NumElts) != 0)
+ return false;
unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
if (NumSrcElts <= SrcIdx)
return false;
- Ops.push_back(SrcVec);
- Mask.push_back(SrcIdx);
- Mask.append(NumZeros, SM_SentinelZero);
- Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
+ unsigned DstIdx =
+ Opcode == ISD::SCALAR_TO_VECTOR ? 0 : N.getConstantOperandVal(2);
+ if (NumElts <= DstIdx)
+ return false;
+
+ if (Opcode == ISD::SCALAR_TO_VECTOR) {
+ Ops.push_back(SrcVec);
+ Mask.append(NumSrcElts, SM_SentinelUndef);
+ } else {
+ Ops.push_back(SrcVec);
+ Ops.push_back(N.getOperand(0));
+ for (int i = 0; i != (int)NumSrcElts; ++i)
+ Mask.push_back(NumSrcElts + i);
+ }
+
+ int Scale = NumSrcElts / NumElts;
+ Mask[Scale * DstIdx] = SrcIdx;
+ for (int i = 0; i != (int)NumZeros; ++i)
+ Mask[(Scale * DstIdx) + i + 1] = SM_SentinelZero;
return true;
}
case X86ISD::PINSRB:
@@ -40562,19 +40589,24 @@ static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
- (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&
+ (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16) ||
+ N->getOpcode() == ISD::INSERT_VECTOR_ELT) &&
"Unexpected vector insertion");
- unsigned NumBitsPerElt = VT.getScalarSizeInBits();
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (TLI.SimplifyDemandedBits(SDValue(N, 0),
- APInt::getAllOnesValue(NumBitsPerElt), DCI))
- return SDValue(N, 0);
+ if (N->getOpcode() == X86ISD::PINSRB || N->getOpcode() == X86ISD::PINSRW) {
+ unsigned NumBitsPerElt = VT.getScalarSizeInBits();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLI.SimplifyDemandedBits(SDValue(N, 0),
+ APInt::getAllOnesValue(NumBitsPerElt), DCI))
+ return SDValue(N, 0);
+ }
- // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
- SDValue Op(N, 0);
- if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
- return Res;
+ // Attempt to combine insertion patterns to a shuffle.
+ if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
+ SDValue Op(N, 0);
+ if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
+ return Res;
+ }
return SDValue();
}
@@ -47189,6 +47221,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::VSRAI:
case X86ISD::VSRLI:
return combineVectorShiftImm(N, DAG, DCI, Subtarget);
+ case ISD::INSERT_VECTOR_ELT:
case X86ISD::PINSRB:
case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 5b7c7ba713a8..e541dcece83f 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -48,10 +48,9 @@ define <16 x float> @test3(<16 x float> %x) nounwind {
define <8 x i64> @test4(<8 x i64> %x) nounwind {
; CHECK-LABEL: test4:
; CHECK: ## %bb.0:
-; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1
-; CHECK-NEXT: vmovq %xmm1, %rax
-; CHECK-NEXT: vpinsrq $1, %rax, %xmm0, %xmm1
-; CHECK-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; CHECK-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm1[0]
+; CHECK-NEXT: vinsertf32x4 $0, %xmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%eee = extractelement <8 x i64> %x, i32 4
%rrr2 = insertelement <8 x i64> %x, i64 %eee, i32 1
diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 8bb063a8738d..a3bbe6f584da 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -1730,15 +1730,9 @@ define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp,
define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastd 24(%rdi), %xmm0
-; CHECK-NEXT: vmovdqa (%rdi), %xmm1
-; CHECK-NEXT: vmovaps 16(%rdi), %xmm2
-; CHECK-NEXT: vmovd %xmm1, %eax
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm0, %xmm0
-; CHECK-NEXT: vextractps $3, %xmm2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; CHECK-NEXT: vpextrd $2, %xmm1, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; CHECK-NEXT: vmovdqa 16(%rdi), %xmm1
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm0 = [2,4,3,6]
+; CHECK-NEXT: vpermi2d (%rdi), %xmm1, %xmm0
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
@@ -1747,17 +1741,11 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) {
define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %vec2, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastd 24(%rdi), %xmm2
-; CHECK-NEXT: vmovdqa (%rdi), %xmm3
-; CHECK-NEXT: vmovaps 16(%rdi), %xmm4
-; CHECK-NEXT: vmovd %xmm3, %eax
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; CHECK-NEXT: vextractps $3, %xmm4, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; CHECK-NEXT: vpextrd $2, %xmm3, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vmovdqa 16(%rdi), %xmm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm3 = [2,4,3,6]
+; CHECK-NEXT: vpermi2d (%rdi), %xmm2, %xmm3
; CHECK-NEXT: vptestnmd %xmm1, %xmm1, %k1
-; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1}
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
@@ -1769,17 +1757,11 @@ define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4
define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %mask) {
; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask3:
; CHECK: # %bb.0:
-; CHECK-NEXT: vpbroadcastd 24(%rdi), %xmm1
-; CHECK-NEXT: vmovdqa (%rdi), %xmm2
-; CHECK-NEXT: vmovaps 16(%rdi), %xmm3
-; CHECK-NEXT: vmovd %xmm2, %eax
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; CHECK-NEXT: vextractps $3, %xmm3, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; CHECK-NEXT: vpextrd $2, %xmm2, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; CHECK-NEXT: vmovdqa 16(%rdi), %xmm2
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm1 = [2,4,3,6]
; CHECK-NEXT: vptestnmd %xmm0, %xmm0, %k1
-; CHECK-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpermi2d (%rdi), %xmm2, %xmm1 {%k1} {z}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
%shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 6, i32 0, i32 7, i32 2>
diff --git a/llvm/test/CodeGen/X86/insertelement-shuffle.ll b/llvm/test/CodeGen/X86/insertelement-shuffle.ll
index a2b8e2dac86c..1b2a85eba6a2 100644
--- a/llvm/test/CodeGen/X86/insertelement-shuffle.ll
+++ b/llvm/test/CodeGen/X86/insertelement-shuffle.ll
@@ -40,9 +40,8 @@ define <8 x i64> @insert_subvector_512(i32 %x0, i32 %x1, <8 x i64> %v) nounwind
; X64_AVX256: # %bb.0:
; X64_AVX256-NEXT: vmovd %edi, %xmm2
; X64_AVX256-NEXT: vpinsrd $1, %esi, %xmm2, %xmm2
-; X64_AVX256-NEXT: vmovq %xmm2, %rax
-; X64_AVX256-NEXT: vextracti128 $1, %ymm0, %xmm2
-; X64_AVX256-NEXT: vpinsrq $0, %rax, %xmm2, %xmm2
+; X64_AVX256-NEXT: vextracti128 $1, %ymm0, %xmm3
+; X64_AVX256-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3]
; X64_AVX256-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; X64_AVX256-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/madd.ll b/llvm/test/CodeGen/X86/madd.ll
index 11756574217e..43bb8ee004a2 100644
--- a/llvm/test/CodeGen/X86/madd.ll
+++ b/llvm/test/CodeGen/X86/madd.ll
@@ -1891,19 +1891,8 @@ define <4 x i32> @larger_mul(<16 x i16> %A, <16 x i16> %B) {
; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpextrd $2, %xmm0, %eax
-; AVX512-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512-NEXT: vmovd %xmm2, %eax
-; AVX512-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpextrd $2, %xmm2, %eax
-; AVX512-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
-; AVX512-NEXT: vpextrd $1, %xmm2, %eax
-; AVX512-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0
-; AVX512-NEXT: vpextrd $3, %xmm2, %eax
-; AVX512-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
-; AVX512-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%a = sext <16 x i16> %A to <16 x i32>
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
index 205ea7f66720..0d5566028e8e 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
@@ -687,15 +687,12 @@ define <2 x i64> @strict_vector_fptosi_v2f32_to_v2i64(<2 x float> %a) #0 {
;
; AVX512DQ-32-LABEL: strict_vector_fptosi_v2f32_to_v2i64:
; AVX512DQ-32: # %bb.0:
-; AVX512DQ-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[1],zero,zero,zero
; AVX512DQ-32-NEXT: vcvttps2qq %ymm1, %zmm1
-; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,zero,zero
+; AVX512DQ-32-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
; AVX512DQ-32-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-32-NEXT: vmovd %xmm0, %eax
-; AVX512DQ-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX512DQ-32-NEXT: vpextrd $1, %xmm0, %eax
-; AVX512DQ-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512DQ-32-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512DQ-32-NEXT: vzeroupper
; AVX512DQ-32-NEXT: retl
;
@@ -1021,15 +1018,12 @@ define <2 x i64> @strict_vector_fptoui_v2f32_to_v2i64(<2 x float> %a) #0 {
;
; AVX512DQ-32-LABEL: strict_vector_fptoui_v2f32_to_v2i64:
; AVX512DQ-32: # %bb.0:
-; AVX512DQ-32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[1],zero,zero,zero
; AVX512DQ-32-NEXT: vcvttps2uqq %ymm1, %zmm1
-; AVX512DQ-32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],zero,zero,zero
+; AVX512DQ-32-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX512DQ-32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
; AVX512DQ-32-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-32-NEXT: vmovd %xmm0, %eax
-; AVX512DQ-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX512DQ-32-NEXT: vpextrd $1, %xmm0, %eax
-; AVX512DQ-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512DQ-32-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512DQ-32-NEXT: vzeroupper
; AVX512DQ-32-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
index 5f89e6da9b47..2fabfafe0ccd 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
@@ -641,11 +641,11 @@ define <4 x double> @sitofp_v4i64_v4f64(<4 x i64> %x) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $64, %esp
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
@@ -876,11 +876,11 @@ define <4 x float> @sitofp_v4i64_v4f32(<4 x i64> %x) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $48, %esp
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
@@ -999,11 +999,11 @@ define <4 x float> @uitofp_v4i64_v4f32(<4 x i64> %x) #0 {
; AVX-32-NEXT: andl $-8, %esp
; AVX-32-NEXT: subl $48, %esp
; AVX-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
-; AVX-32-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; AVX-32-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[2,3,2,3]
; AVX-32-NEXT: vmovlps %xmm2, {{[0-9]+}}(%esp)
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
index 31b2b7c81082..be1d15ba68b3 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
@@ -273,18 +273,18 @@ define <8 x double> @sitofp_v8i64_v8f64(<8 x i64> %x) #0 {
; NODQ-32-NEXT: subl $128, %esp
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm1
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm0
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
@@ -401,19 +401,19 @@ define <8 x float> @sitofp_v8i64_v8f32(<8 x i64> %x) #0 {
; NODQ-32-NEXT: andl $-8, %esp
; NODQ-32-NEXT: subl $96, %esp
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm1
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
@@ -497,19 +497,19 @@ define <8 x float> @uitofp_v8i64_v8f32(<8 x i64> %x) #0 {
; NODQ-32-NEXT: andl $-8, %esp
; NODQ-32-NEXT: subl $96, %esp
; NODQ-32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf128 $1, %ymm0, %xmm3
; NODQ-32-NEXT: vmovlps %xmm3, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm3[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm3[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; NODQ-32-NEXT: vmovlps %xmm2, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractf32x4 $3, %zmm0, %xmm1
; NODQ-32-NEXT: vmovlps %xmm1, {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: vpermilps {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; NODQ-32-NEXT: vpermilps {{.*#+}} xmm4 = xmm1[2,3,2,3]
; NODQ-32-NEXT: vmovlps %xmm4, {{[0-9]+}}(%esp)
; NODQ-32-NEXT: vextractps $1, %xmm0, %eax
; NODQ-32-NEXT: shrl $31, %eax
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index 73f28aa08d51..13ce66312aaa 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -2105,14 +2105,12 @@ define <4 x i32> @extract3_insert3_v4i32_0127(<4 x i32> %a0, <4 x i32> %a1) {
;
; SSE41-LABEL: extract3_insert3_v4i32_0127:
; SSE41: # %bb.0:
-; SSE41-NEXT: extractps $3, %xmm1, %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm0
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: extract3_insert3_v4i32_0127:
; AVX: # %bb.0:
-; AVX-NEXT: vextractps $3, %xmm1, %eax
-; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX-NEXT: retq
%1 = extractelement <4 x i32> %a1, i32 3
%2 = insertelement <4 x i32> %a0, i32 %1, i32 3
More information about the llvm-commits
mailing list