[LLVMbugs] [Bug 21780] New: [X86][AVX] Expansion of 256 bit vector loads fails to fold into shuffles
bugzilla-daemon at llvm.org
bugzilla-daemon at llvm.org
Mon Dec 8 12:06:17 PST 2014
http://llvm.org/bugs/show_bug.cgi?id=21780
Bug ID: 21780
Summary: [X86][AVX] Expansion of 256 bit vector loads fails to
fold into shuffles
Product: new-bugs
Version: trunk
Hardware: PC
OS: Windows NT
Status: NEW
Severity: normal
Priority: P
Component: new bugs
Assignee: unassignedbugs at nondot.org
Reporter: llvm-dev at redking.me.uk
CC: llvmbugs at cs.uiuc.edu
Classification: Unclassified
Follow up to [Bug #21710] '[X86][AVX] suboptimal expansion of 256 bit vector
loads.'
Merging of consecutive loads into a 256-bit ymm register now works well for
simple cases, and the loads also fold nicely for bitwise ops (as well as basic
float ops - fadd, fsub etc.).
Vector shuffle optimizations however attempt to selectively load individual
lanes and in doing so prevent the optimization from folding the load into the
shuffle.
e.g.
__m256d vsht_d4(__m256d foo) {
return __builtin_shufflevector( foo, foo, 0, 0, 2, 2 );
}
define <4 x double> @_Z7vsht_d4Dv4_d(<4 x double> %foo) #1 {
%1 = shufflevector <4 x double> %foo, <4 x double> undef, <4 x i32> <i32 0,
i32 0, i32 2, i32 2>
ret <4 x double> %1
}
vpermilpd $0, %ymm0, %ymm0 # ymm0 = ymm0[0,0,2,2]
retq
__m256d vsht_d4_fold(const double* ptr) {
__m256d foo = (__m256d){ ptr[0], ptr[1], ptr[2], ptr[3] };
return __builtin_shufflevector( foo, foo, 0, 0, 2, 2 );
}
define <4 x double> @_Z12vsht_d4_foldPKd(double* nocapture readonly %ptr) #0 {
%1 = load double* %ptr, align 8, !tbaa !1
%2 = insertelement <4 x double> undef, double %1, i32 0
%3 = getelementptr inbounds double* %ptr, i64 2
%4 = load double* %3, align 8, !tbaa !1
%5 = insertelement <4 x double> %2, double %4, i32 2
%6 = shufflevector <4 x double> %5, <4 x double> undef, <4 x i32> <i32 0, i32
0, i32 2, i32 2>
ret <4 x double> %6
}
vmovsd (%rdi), %xmm0
vmovsd 16(%rdi), %xmm1
vinsertf128 $1, %xmm1, %ymm0, %ymm0
vpermilpd $0, %ymm0, %ymm0 # ymm0 = ymm0[0,0,2,2]
retq
Manually editing the IR does permit the fold to occur:
define <4 x double> @_Z12vsht_d4_foldPKd(double* nocapture readonly %ptr) #0 {
%1 = load double* %ptr, align 8, !tbaa !1
%2 = insertelement <4 x double> undef, double %1, i32 0
%3 = getelementptr inbounds double* %ptr, i64 1
%4 = load double* %3, align 8, !tbaa !1
%5 = insertelement <4 x double> %2, double %4, i32 1
%6 = getelementptr inbounds double* %ptr, i64 2
%7 = load double* %6, align 8, !tbaa !1
%8 = insertelement <4 x double> %5, double %7, i32 2
%9 = getelementptr inbounds double* %ptr, i64 3
%10 = load double* %9, align 8, !tbaa !1
%11 = insertelement <4 x double> %8, double %10, i32 3
%12 = shufflevector <4 x double> %11, <4 x double> undef, <4 x i32> <i32 0,
i32 0, i32 2, i32 2>
ret <4 x double> %12
}
vpermilpd $0, (%rdi), %ymm0 # ymm0 = mem[0,0,2,2]
retq
--
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20141208/7ebfc473/attachment.html>
More information about the llvm-bugs
mailing list