[llvm] r263239 - [X86][AVX] Fixed issue where a long chain of shuffles could attempt to combine to a single (illegal) PSHUFB instruction.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 11 06:39:11 PST 2016
Author: rksimon
Date: Fri Mar 11 08:39:10 2016
New Revision: 263239
URL: http://llvm.org/viewvc/llvm-project?rev=263239&view=rev
Log:
[X86][AVX] Fixed issue where a long chain of shuffles could attempt to combine to a single (illegal) PSHUFB instruction.
Its not enough that we test for SSSE3 - that's only OK for 128-bit vectors - we also need to test for AVX2 / AVX512BW for 256/512 bit vector cases.
Modified:
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=263239&r1=263238&r2=263239&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Mar 11 08:39:10 2016
@@ -23945,7 +23945,10 @@ static bool combineX86ShuffleChain(SDVal
// can replace them with a single PSHUFB instruction profitably. Intel's
// manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
// in practice PSHUFB tends to be *very* fast so we're more aggressive.
- if ((Depth >= 3 || HasPSHUFB) && Subtarget.hasSSSE3()) {
+ if ((Depth >= 3 || HasPSHUFB) &&
+ ((VT.is128BitVector() && Subtarget.hasSSSE3()) ||
+ (VT.is256BitVector() && Subtarget.hasAVX2()) ||
+ (VT.is512BitVector() && Subtarget.hasBWI()))) {
SmallVector<SDValue, 16> PSHUFBMask;
int NumBytes = VT.getSizeInBits() / 8;
int Ratio = NumBytes / Mask.size();
Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll?rev=263239&r1=263238&r2=263239&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx.ll Fri Mar 11 08:39:10 2016
@@ -19,8 +19,8 @@ declare <8 x i32> @llvm.x86.avx.vperm2f1
declare <8 x float> @llvm.x86.avx.vperm2f128.ps.256(<8 x float>, <8 x float>, i8)
declare <4 x double> @llvm.x86.avx.vperm2f128.pd.256(<4 x double>, <4 x double>, i8)
-define <4 x float> @combine_vpermilvar_4f32(<4 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f32:
+define <4 x float> @combine_vpermilvar_4f32_identity(<4 x float> %a0) {
+; ALL-LABEL: combine_vpermilvar_4f32_identity:
; ALL: # BB#0:
; ALL-NEXT: retq
%1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
@@ -28,8 +28,8 @@ define <4 x float> @combine_vpermilvar_4
ret <4 x float> %2
}
-define <8 x float> @combine_vpermilvar_8f32(<8 x float> %a0) {
-; ALL-LABEL: combine_vpermilvar_8f32:
+define <8 x float> @combine_vpermilvar_8f32_identity(<8 x float> %a0) {
+; ALL-LABEL: combine_vpermilvar_8f32_identity:
; ALL: # BB#0:
; ALL-NEXT: retq
%1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 2, i32 3, i32 0, i32 1>)
@@ -37,8 +37,8 @@ define <8 x float> @combine_vpermilvar_8
ret <8 x float> %2
}
-define <2 x double> @combine_vpermilvar_2f64(<2 x double> %a0) {
-; ALL-LABEL: combine_vpermilvar_2f64:
+define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) {
+; ALL-LABEL: combine_vpermilvar_2f64_identity:
; ALL: # BB#0:
; ALL-NEXT: retq
%1 = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %a0, <2 x i64> <i64 2, i64 0>)
@@ -46,11 +46,49 @@ define <2 x double> @combine_vpermilvar_
ret <2 x double> %2
}
-define <4 x double> @combine_vpermilvar_4f64(<4 x double> %a0) {
-; ALL-LABEL: combine_vpermilvar_4f64:
+define <4 x double> @combine_vpermilvar_4f64_identity(<4 x double> %a0) {
+; ALL-LABEL: combine_vpermilvar_4f64_identity:
; ALL: # BB#0:
; ALL-NEXT: retq
%1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
%2 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %1, <4 x i64> <i64 2, i64 0, i64 2, i64 0>)
ret <4 x double> %2
}
+
+define <4 x float> @combine_vpermilvar_4f32_4stage(<4 x float> %a0) {
+; ALL-LABEL: combine_vpermilvar_4f32_4stage:
+; ALL: # BB#0:
+; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,10,11,0,1,2,3,12,13,14,15,4,5,6,7]
+; ALL-NEXT: retq
+ %1 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
+ %2 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %1, <4 x i32> <i32 2, i32 3, i32 0, i32 1>)
+ %3 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>)
+ %4 = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %3, <4 x i32> <i32 3, i32 2, i32 1, i32 0>)
+ ret <4 x float> %4
+}
+
+define <8 x float> @combine_vpermilvar_8f32_4stage(<8 x float> %a0) {
+; AVX1-LABEL: combine_vpermilvar_8f32_4stage:
+; AVX1: # BB#0:
+; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [3,2,1,0,3,2,1,0]
+; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
+; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_vpermilvar_8f32_4stage:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,0,1,2,3,12,13,14,15,4,5,6,7,24,25,26,27,16,17,18,19,28,29,30,31,20,21,22,23]
+; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: combine_vpermilvar_8f32_4stage:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,0,1,2,3,12,13,14,15,4,5,6,7,24,25,26,27,16,17,18,19,28,29,30,31,20,21,22,23]
+; AVX512F-NEXT: retq
+ %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
+ %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %1, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>)
+ %3 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %2, <8 x i32> <i32 0, i32 2, i32 1, i32 3, i32 0, i32 2, i32 1, i32 3>)
+ %4 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %3, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 3, i32 2, i32 1, i32 0>)
+ ret <8 x float> %4
+}
More information about the llvm-commits
mailing list