[llvm] r218193 - [x86] Teach the new vector shuffle lowering to use VPERMILPD for
Chandler Carruth
chandlerc at gmail.com
Sat Sep 20 15:09:27 PDT 2014
Author: chandlerc
Date: Sat Sep 20 17:09:27 2014
New Revision: 218193
URL: http://llvm.org/viewvc/llvm-project?rev=218193&view=rev
Log:
[x86] Teach the new vector shuffle lowering to use VPERMILPD for
single-input shuffles with doubles. This allows them to fold memory
operands into the shuffle, etc. This is just the analog to the v4f32
case in my prior commit.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=218193&r1=218192&r2=218193&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Sat Sep 20 17:09:27 2014
@@ -7657,6 +7657,14 @@ static SDValue lowerV2F64VectorShuffle(S
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
+
+ if (Subtarget->hasAVX()) {
+ // If we have AVX, we can use VPERMILPS which will allow folding a load
+ // into the shuffle.
+ return DAG.getNode(X86ISD::VPERMILP, DL, MVT::v2f64, V1,
+ DAG.getConstant(SHUFPDMask, MVT::i8));
+ }
+
return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
DAG.getConstant(SHUFPDMask, MVT::i8));
}
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll?rev=218193&r1=218192&r2=218193&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll Sat Sep 20 17:09:27 2014
@@ -70,9 +70,13 @@ define <2 x double> @shuffle_v2f64_00(<2
ret <2 x double> %shuffle
}
define <2 x double> @shuffle_v2f64_10(<2 x double> %a, <2 x double> %b) {
-; ALL-LABEL: @shuffle_v2f64_10
-; ALL: shufpd {{.*}} # xmm0 = xmm0[1,0]
-; ALL-NEXT: retq
+; SSE-LABEL: @shuffle_v2f64_10
+; SSE: shufpd {{.*}} # xmm0 = xmm0[1,0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: @shuffle_v2f64_10
+; AVX: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
+; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 1, i32 0>
ret <2 x double> %shuffle
}
@@ -112,7 +116,7 @@ define <2 x double> @shuffle_v2f64_32(<2
; SSE-NEXT: retq
;
; AVX-LABEL: @shuffle_v2f64_32
-; AVX: vshufpd {{.*}} # xmm0 = xmm1[1,0]
+; AVX: vpermilpd {{.*}} # xmm0 = xmm1[1,0]
; AVX-NEXT: retq
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 3, i32 2>
ret <2 x double> %shuffle
@@ -520,3 +524,19 @@ define <2 x double> @insert_dup_mem_v2f6
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
ret <2 x double> %shuffle
}
+
+define <2 x double> @shuffle_mem_v2f64_10(<2 x double>* %ptr) {
+; SSE-LABEL: @shuffle_mem_v2f64_10
+; SSE: # BB#0:
+; SSE-NEXT: movapd (%rdi), %xmm0
+; SSE-NEXT: shufpd {{.*}} # xmm0 = xmm0[1,0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: @shuffle_mem_v2f64_10
+; AVX: # BB#0:
+; AVX-NEXT: vpermilpd {{.*}} # xmm0 = mem[1,0]
+; AVX-NEXT: retq
+ %a = load <2 x double>* %ptr
+ %shuffle = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ ret <2 x double> %shuffle
+}
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll?rev=218193&r1=218192&r2=218193&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll Sat Sep 20 17:09:27 2014
@@ -129,7 +129,7 @@ define <4 x double> @shuffle_v4f64_0300(
define <4 x double> @shuffle_v4f64_1000(<4 x double> %a, <4 x double> %b) {
; AVX1-LABEL: @shuffle_v4f64_1000
; AVX1: # BB#0:
-; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm0[1,0]
+; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm0[1,0]
; AVX1-NEXT: vunpcklpd {{.*}} # xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@@ -162,8 +162,8 @@ define <4 x double> @shuffle_v4f64_3210(
; AVX1-LABEL: @shuffle_v4f64_3210
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vshufpd {{.*}} # xmm1 = xmm1[1,0]
-; AVX1-NEXT: vshufpd {{.*}} # xmm0 = xmm0[1,0]
+; AVX1-NEXT: vpermilpd {{.*}} # xmm1 = xmm1[1,0]
+; AVX1-NEXT: vpermilpd {{.*}} # xmm0 = xmm0[1,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
More information about the llvm-commits
mailing list