[llvm-bugs] [Bug 40574] New: Field ordering still causes extra memcpy
via llvm-bugs
llvm-bugs at lists.llvm.org
Sat Feb 2 11:53:43 PST 2019
https://bugs.llvm.org/show_bug.cgi?id=40574
Bug ID: 40574
Summary: Field ordering still causes extra memcpy
Product: libraries
Version: trunk
Hardware: PC
OS: All
Status: NEW
Severity: enhancement
Priority: P
Component: Scalar Optimizations
Assignee: unassignedbugs at nondot.org
Reporter: jmuizelaar at mozilla.com
CC: llvm-bugs at lists.llvm.org
Even with bug 39844 fixed we can still get extra memcpys depending on field
ordering:
#include <stdlib.h>
struct SV {
size_t capacity;
size_t disc;
size_t data[40];
static SV make() {
SV ret;
ret.capacity = 0;
ret.disc = 0;
return ret;
}
};
struct L {
SV a;
SV b;
};
template<class T>
struct Allocation {
T *vec;
void init(T s) {
*vec = s;
}
};
void bar(Allocation<L> a, double g) {
L s = { SV::make(), SV::make() };
a.init(s);
}
produces
bar(Allocation<L>, double): # @bar(Allocation<L>, double)
subq $680, %rsp # imm = 0x2A8
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movaps %xmm0, 336(%rsp)
movq %rsp, %rsi
movl $672, %edx # imm = 0x2A0
callq memcpy
addq $680, %rsp # imm = 0x2A8
retq
but moving capacity to the end gives:
bar(Allocation<L>, double): # @bar(Allocation<L>, double)
movq $0, (%rdi)
xorps %xmm0, %xmm0
movups %xmm0, 328(%rdi)
movq $0, 664(%rdi)
retq
--
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20190202/1a553fb8/attachment.html>
More information about the llvm-bugs
mailing list