[llvm] r215819 - [x86] Teach lots of the new vector shuffle lowering to use UNPCK
Chandler Carruth
chandlerc at gmail.com
Sat Aug 16 02:42:15 PDT 2014
Author: chandlerc
Date: Sat Aug 16 04:42:15 2014
New Revision: 215819
URL: http://llvm.org/viewvc/llvm-project?rev=215819&view=rev
Log:
[x86] Teach lots of the new vector shuffle lowering to use UNPCK
instructions for blend operations at 128 bits. This was a serious hole
in our prior blend lowering.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=215819&r1=215818&r2=215819&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Aug 16 04:42:15 2014
@@ -7142,6 +7142,12 @@ static SDValue lowerV2F64VectorShuffle(S
assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
assert(Mask[1] >= 2 && "Non-canonicalized blend!");
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(Mask, 0, 2))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
+ if (isShuffleEquivalent(Mask, 1, 3))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
+
unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
DAG.getConstant(SHUFPDMask, MVT::i8));
@@ -7178,6 +7184,12 @@ static SDValue lowerV2I64VectorShuffle(S
getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
}
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(Mask, 0, 2))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
+ if (isShuffleEquivalent(Mask, 1, 3))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
+
// We implement this with SHUFPD which is pretty lame because it will likely
// incur 2 cycles of stall for integer vectors on Nehalem and older chips.
// However, all the alternatives are still more cycles and newer chips don't
@@ -7216,6 +7228,12 @@ static SDValue lowerV4F32VectorShuffle(S
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
getV4X86ShuffleImm8ForMask(Mask, DAG));
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
+ if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
+
if (NumV2Elements == 1) {
int V2Index =
std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
@@ -7304,6 +7322,12 @@ static SDValue lowerV4I32VectorShuffle(S
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(Mask, DAG));
+ // Use dedicated unpack instructions for masks that match their pattern.
+ if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
+ return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
+ if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
+ return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
+
// We implement this with SHUFPS because it can blend from two vectors.
// Because we're going to eventually use SHUFPS, we use SHUFPS even to build
// up the inputs, bypassing domain shift penalties that we would encur if we
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll?rev=215819&r1=215818&r2=215819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll Sat Aug 16 04:42:15 2014
@@ -95,15 +95,15 @@ define <2 x double> @shuffle_v2f64_33(<2
define <2 x i64> @shuffle_v2i64_02(<2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_02
-; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2: punpcklqdq {{.*}} # xmm0 = xmm0[0],xmm1[0]
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle
}
define <2 x i64> @shuffle_v2i64_02_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_02_copy
-; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm2[0]
-; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2: punpcklqdq {{.*}} # xmm1 = xmm1[0],xmm2[0]
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 2>
ret <2 x i64> %shuffle
@@ -140,31 +140,31 @@ define <2 x i64> @shuffle_v2i64_12_copy(
}
define <2 x i64> @shuffle_v2i64_13(<2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_13
-; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[1],xmm1[1]
+; CHECK-SSE2: punpckhqdq {{.*}} # xmm0 = xmm0[1],xmm1[1]
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle
}
define <2 x i64> @shuffle_v2i64_13_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_13_copy
-; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm2[1]
-; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2: punpckhqdq {{.*}} # xmm1 = xmm1[1],xmm2[1]
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 1, i32 3>
ret <2 x i64> %shuffle
}
define <2 x i64> @shuffle_v2i64_20(<2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_20
-; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
-; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2: punpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
ret <2 x i64> %shuffle
}
define <2 x i64> @shuffle_v2i64_20_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_20_copy
-; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[0],xmm1[0]
-; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2: punpcklqdq {{.*}} # xmm2 = xmm2[0],xmm1[0]
+; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
ret <2 x i64> %shuffle
@@ -203,16 +203,16 @@ define <2 x i64> @shuffle_v2i64_30_copy(
}
define <2 x i64> @shuffle_v2i64_31(<2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_31
-; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[1],xmm0[1]
-; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2: punpckhqdq {{.*}} # xmm1 = xmm1[1],xmm0[1]
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
ret <2 x i64> %shuffle
}
define <2 x i64> @shuffle_v2i64_31_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64> %b) {
; CHECK-SSE2-LABEL: @shuffle_v2i64_31_copy
-; CHECK-SSE2: shufpd {{.*}} # xmm2 = xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: movapd %xmm2, %xmm0
+; CHECK-SSE2: punpckhqdq {{.*}} # xmm2 = xmm2[1],xmm1[1]
+; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
ret <2 x i64> %shuffle
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll?rev=215819&r1=215818&r2=215819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll Sat Aug 16 04:42:15 2014
@@ -155,7 +155,7 @@ define <4 x i32> @shuffle_v4i32_4012(<4
}
define <4 x i32> @shuffle_v4i32_0145(<4 x i32> %a, <4 x i32> %b) {
; CHECK-SSE2-LABEL: @shuffle_v4i32_0145
-; CHECK-SSE2: shufpd {{.*}} # xmm0 = xmm0[0],xmm1[0]
+; CHECK-SSE2: punpcklqdq {{.*}} # xmm0 = xmm0[0],xmm1[0]
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
ret <4 x i32> %shuffle
@@ -170,8 +170,8 @@ define <4 x i32> @shuffle_v4i32_0451(<4
}
define <4 x i32> @shuffle_v4i32_4501(<4 x i32> %a, <4 x i32> %b) {
; CHECK-SSE2-LABEL: @shuffle_v4i32_4501
-; CHECK-SSE2: shufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
-; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2: punpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
; CHECK-SSE2-NEXT: retq
%shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
ret <4 x i32> %shuffle
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=215819&r1=215818&r2=215819&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Sat Aug 16 04:42:15 2014
@@ -17,7 +17,7 @@ define <4 x i64> @shuffle_v4i64_0020(<4
; AVX1-LABEL: @shuffle_v4i64_0020
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vpunpcklqdq {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -108,7 +108,7 @@ define <4 x double> @shuffle_v4f64_0020(
; AVX1-LABEL: @shuffle_v4f64_0020
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vunpcklpd {{.*}} # xmm1 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovlhps {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
More information about the llvm-commits
mailing list