[llvm] cd3b850 - rG9bd97d0363987b582 - Revert "[X86][SSE] Fold HOP(SHUFFLE(X),SHUFFLE(Y)) --> SHUFFLE(HOP(X,Y))"
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 13 07:43:45 PDT 2020
Author: Simon Pilgrim
Date: 2020-08-13T15:21:15+01:00
New Revision: cd3b850a4c8eb7871f2cefb47b1274e734ad92f6
URL: https://github.com/llvm/llvm-project/commit/cd3b850a4c8eb7871f2cefb47b1274e734ad92f6
DIFF: https://github.com/llvm/llvm-project/commit/cd3b850a4c8eb7871f2cefb47b1274e734ad92f6.diff
LOG: rG9bd97d0363987b582 - Revert "[X86][SSE] Fold HOP(SHUFFLE(X),SHUFFLE(Y)) --> SHUFFLE(HOP(X,Y))"
This reverts commit 9bd97d0363987b582e4a92b354b02e86ac068407.
Seeing some codegen issues in internal testing.
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/haddsub-shuf.ll
llvm/test/CodeGen/X86/haddsub-undef.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index da5bb9206a3f..b03d7aa24743 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -42108,39 +42108,6 @@ static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
}
}
- // Attempt to fold HOP(SHUFFLE(X),SHUFFLE(Y)) -> SHUFFLE(HOP(X,Y)).
- // TODO: Merge with binary shuffle folds below.
- if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
- int PostShuffle[4] = {0, 1, 2, 3};
-
- // If the op is an unary shuffle that can scale to v2x64,
- // then we can perform this as a v4x32 post shuffle.
- auto AdjustOp = [&](SDValue V, int Offset) {
- auto *SVN = dyn_cast<ShuffleVectorSDNode>(V);
- SmallVector<int, 2> ScaledMask;
- if (!SVN || !SVN->getOperand(1).isUndef() ||
- !scaleShuffleElements(SVN->getMask(), 2, ScaledMask) ||
- !N->isOnlyUserOf(V.getNode()))
- return SDValue();
- PostShuffle[Offset + 0] = ScaledMask[0];
- PostShuffle[Offset + 1] = ScaledMask[1];
- return SVN->getOperand(0);
- };
-
- SDValue Src0 = AdjustOp(N0, 0);
- SDValue Src1 = AdjustOp(N1, 2);
- if (Src0 || Src1) {
- Src0 = Src0 ? Src0 : N0;
- Src1 = Src1 ? Src1 : N1;
- SDLoc DL(N);
- MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
- SDValue Res = DAG.getNode(Opcode, DL, VT, Src0, Src1);
- Res = DAG.getBitcast(ShufVT, Res);
- Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
- return DAG.getBitcast(VT, Res);
- }
- }
-
// Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
// TODO: Relax shuffle scaling to support sub-128-bit subvector shuffles.
if (VT.is256BitVector() && Subtarget.hasInt256()) {
diff --git a/llvm/test/CodeGen/X86/haddsub-shuf.ll b/llvm/test/CodeGen/X86/haddsub-shuf.ll
index 4f7528b3141a..24a1b1c32a10 100644
--- a/llvm/test/CodeGen/X86/haddsub-shuf.ll
+++ b/llvm/test/CodeGen/X86/haddsub-shuf.ll
@@ -879,45 +879,55 @@ declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>)
define <4 x float> @PR34724_1(<4 x float> %a, <4 x float> %b) {
; SSSE3_SLOW-LABEL: PR34724_1:
; SSSE3_SLOW: # %bb.0:
+; SSSE3_SLOW-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3_SLOW-NEXT: haddps %xmm1, %xmm0
; SSSE3_SLOW-NEXT: movsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; SSSE3_SLOW-NEXT: addps %xmm1, %xmm2
; SSSE3_SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[2,0]
-; SSSE3_SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
+; SSSE3_SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0]
; SSSE3_SLOW-NEXT: retq
;
; SSSE3_FAST-LABEL: PR34724_1:
; SSSE3_FAST: # %bb.0:
+; SSSE3_FAST-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSSE3_FAST-NEXT: haddps %xmm1, %xmm0
; SSSE3_FAST-NEXT: haddps %xmm1, %xmm1
; SSSE3_FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
-; SSSE3_FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSSE3_FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
; SSSE3_FAST-NEXT: retq
;
; AVX1_SLOW-LABEL: PR34724_1:
; AVX1_SLOW: # %bb.0:
+; AVX1_SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1_SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1_SLOW-NEXT: vmovsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; AVX1_SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX1_SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX1_SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX1_SLOW-NEXT: retq
;
; AVX1_FAST-LABEL: PR34724_1:
; AVX1_FAST: # %bb.0:
+; AVX1_FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1_FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
+; AVX1_FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX1_FAST-NEXT: retq
;
; AVX2_SLOW-LABEL: PR34724_1:
; AVX2_SLOW: # %bb.0:
+; AVX2_SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2_SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX2_SLOW-NEXT: vmovsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; AVX2_SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX2_SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX2_SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX2_SLOW-NEXT: retq
;
; AVX2_FAST-LABEL: PR34724_1:
; AVX2_FAST: # %bb.0:
+; AVX2_FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2_FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
+; AVX2_FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX2_FAST-NEXT: retq
%t0 = shufflevector <4 x float> %a, <4 x float> %b, <2 x i32> <i32 2, i32 4>
%t1 = shufflevector <4 x float> %a, <4 x float> %b, <2 x i32> <i32 3, i32 5>
diff --git a/llvm/test/CodeGen/X86/haddsub-undef.ll b/llvm/test/CodeGen/X86/haddsub-undef.ll
index 5a9da3615a3b..a90e8b67a8ca 100644
--- a/llvm/test/CodeGen/X86/haddsub-undef.ll
+++ b/llvm/test/CodeGen/X86/haddsub-undef.ll
@@ -954,32 +954,38 @@ define <4 x float> @PR45747_2(<4 x float> %a, <4 x float> %b) nounwind {
define <4 x float> @PR34724_add_v4f32_u123(<4 x float> %0, <4 x float> %1) {
; SSE-SLOW-LABEL: PR34724_add_v4f32_u123:
; SSE-SLOW: # %bb.0:
+; SSE-SLOW-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-SLOW-NEXT: haddps %xmm1, %xmm0
; SSE-SLOW-NEXT: movsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; SSE-SLOW-NEXT: addps %xmm1, %xmm2
; SSE-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[2,0]
-; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
+; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm2[2,0]
; SSE-SLOW-NEXT: retq
;
; SSE-FAST-LABEL: PR34724_add_v4f32_u123:
; SSE-FAST: # %bb.0:
+; SSE-FAST-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-FAST-NEXT: haddps %xmm1, %xmm0
; SSE-FAST-NEXT: haddps %xmm1, %xmm1
; SSE-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[2,0]
-; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[2,0]
; SSE-FAST-NEXT: retq
;
; AVX-SLOW-LABEL: PR34724_add_v4f32_u123:
; AVX-SLOW: # %bb.0:
+; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX-SLOW-NEXT: vmovsldup {{.*#+}} xmm2 = xmm1[0,0,2,2]
; AVX-SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: PR34724_add_v4f32_u123:
; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
+; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,3]
; AVX-FAST-NEXT: retq
%3 = shufflevector <4 x float> %0, <4 x float> %1, <2 x i32> <i32 2, i32 4>
%4 = shufflevector <4 x float> %0, <4 x float> %1, <2 x i32> <i32 3, i32 5>
More information about the llvm-commits
mailing list