[llvm] r363178 - [X86][AVX] Fold concat(vpermilps(x, c), vpermilps(y, c)) -> vpermilps(concat(x, y), c)
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 12 09:38:20 PDT 2019
Author: rksimon
Date: Wed Jun 12 09:38:20 2019
New Revision: 363178
URL: http://llvm.org/viewvc/llvm-project?rev=363178&view=rev
Log:
[X86][AVX] Fold concat(vpermilps(x,c),vpermilps(y,c)) -> vpermilps(concat(x,y),c)
Handles PSHUFD/PSHUFLW/PSHUFHW (AVX2) + VPERMILPS (AVX1).
An extra AVX1 PSHUFD->VPERMILPS combine will be added in a future commit.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=363178&r1=363177&r2=363178&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Jun 12 09:38:20 2019
@@ -43021,12 +43021,42 @@ static SDValue combineConcatVectorOps(co
return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
}
+ bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
+
// Repeated opcode.
+ // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
+ // but it currently struggles with different vector widths.
if (llvm::all_of(Ops, [Op0](SDValue Op) {
return Op.getOpcode() == Op0.getOpcode();
})) {
unsigned NumOps = Ops.size();
switch (Op0.getOpcode()) {
+ case X86ISD::PSHUFHW:
+ case X86ISD::PSHUFLW:
+ case X86ISD::PSHUFD:
+ if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
+ Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
+ SmallVector<SDValue, 2> Src;
+ for (unsigned i = 0; i != NumOps; ++i)
+ Src.push_back(Ops[i].getOperand(0));
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
+ Op0.getOperand(1));
+ }
+ break;
+ case X86ISD::VPERMILPI:
+ // TODO - AVX1 must use VPERMILPI + v8f32 for v8i32 shuffles.
+ // TODO - add support for vXf64/vXi64 shuffles.
+ if (!IsSplat && NumOps == 2 && VT == MVT::v8f32 && Subtarget.hasAVX() &&
+ Op0.getOperand(1) == Ops[1].getOperand(1)) {
+ SmallVector<SDValue, 2> Src;
+ for (unsigned i = 0; i != NumOps; ++i)
+ Src.push_back(Ops[i].getOperand(0));
+ return DAG.getNode(Op0.getOpcode(), DL, VT,
+ DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
+ Op0.getOperand(1));
+ }
+ break;
case X86ISD::PACKUS:
if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
SmallVector<SDValue, 2> LHS, RHS;
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll?rev=363178&r1=363177&r2=363178&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v16.ll Wed Jun 12 09:38:20 2019
@@ -4490,9 +4490,9 @@ define <16 x i16> @shuffle_v16i16_03_02_
;
; AVX2OR512VL-LABEL: shuffle_v16i16_03_02_01_00_04_05_06_07_11_10_09_08_12_13_14_15_v8i16:
; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
-; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7]
+; AVX2OR512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2OR512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2OR512VL-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,1,0,4,5,6,7,11,10,9,8,12,13,14,15]
; AVX2OR512VL-NEXT: retq
%1 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
%2 = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 4, i32 5, i32 6, i32 7>
@@ -4510,9 +4510,9 @@ define <16 x i16> @shuffle_v16i16_00_01_
;
; AVX2OR512VL-LABEL: shuffle_v16i16_00_01_02_04_07_06_05_04_08_09_10_11_15_14_13_12_v8i16:
; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
-; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4]
+; AVX2OR512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX2OR512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2OR512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,4,8,9,10,11,15,14,13,12]
; AVX2OR512VL-NEXT: retq
%1 = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4>
%2 = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 7, i32 6, i32 5, i32 4>
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=363178&r1=363177&r2=363178&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Wed Jun 12 09:38:20 2019
@@ -1443,12 +1443,26 @@ define <4 x i64> @shuffle_v4i64_1z3z(<4
}
define <4 x i64> @shuffle_v4i64_1032_v2i64(<2 x i64> %a, <2 x i64> %b) {
-; ALL-LABEL: shuffle_v4i64_1032_v2i64:
-; ALL: # %bb.0:
-; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
-; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; ALL-NEXT: retq
+; AVX1-LABEL: shuffle_v4i64_1032_v2i64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v4i64_1032_v2i64:
+; AVX2: # %bb.0:
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX2-NEXT: retq
+;
+; AVX512VL-LABEL: shuffle_v4i64_1032_v2i64:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX512VL-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
%2 = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
%3 = shufflevector <2 x i64> %1, <2 x i64> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll?rev=363178&r1=363177&r2=363178&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll Wed Jun 12 09:38:20 2019
@@ -1219,9 +1219,9 @@ define <8 x float> @shuffle_v8f32_5555uu
define <8 x float> @shuffle_v8f32_32107654_v4f32(<4 x float> %a, <4 x float> %b) {
; ALL-LABEL: shuffle_v8f32_32107654_v4f32:
; ALL: # %bb.0:
-; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
+; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; ALL-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%2 = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -2520,12 +2520,19 @@ define <8 x i32> @shuffle_v8i32_uuuuuu7u
}
define <8 x i32> @shuffle_v8i32_32107654_v4i32(<4 x i32> %a, <4 x i32> %b) {
-; ALL-LABEL: shuffle_v8i32_32107654_v4i32:
-; ALL: # %bb.0:
-; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
-; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
-; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; ALL-NEXT: retq
+; AVX1-LABEL: shuffle_v8i32_32107654_v4i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2OR512VL-LABEL: shuffle_v8i32_32107654_v4i32:
+; AVX2OR512VL: # %bb.0:
+; AVX2OR512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX2OR512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2OR512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; AVX2OR512VL-NEXT: retq
%1 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%2 = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
%3 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
More information about the llvm-commits
mailing list