[llvm] r333429 - [AArch64] Fix PR32384: bump up the number of stores per memset and memcpy
Evandro Menezes via llvm-commits
llvm-commits at lists.llvm.org
Tue May 29 08:58:50 PDT 2018
Author: evandro
Date: Tue May 29 08:58:50 2018
New Revision: 333429
URL: http://llvm.org/viewvc/llvm-project?rev=333429&view=rev
Log:
[AArch64] Fix PR32384: bump up the number of stores per memset and memcpy
As suggested in https://bugs.llvm.org/show_bug.cgi?id=32384#c1, this change
makes the inlining of `memset()` and `memcpy()` more aggressive when
compiling for speed. The tuning remains the same when optimizing for size.
Patch by: Sebastian Pop <s.pop at samsung.com>
Evandro Menezes <e.menezes at samsung.com>
Differential revision: https://reviews.llvm.org/D45098
Modified:
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
llvm/trunk/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
llvm/trunk/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=333429&r1=333428&r2=333429&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Tue May 29 08:58:50 2018
@@ -579,11 +579,17 @@ AArch64TargetLowering::AArch64TargetLowe
setTargetDAGCombine(ISD::GlobalAddress);
- MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 8;
+ // In case of strict alignment, avoid an excessive number of byte wide stores.
+ MaxStoresPerMemsetOptSize = 8;
+ MaxStoresPerMemset = Subtarget->requiresStrictAlign()
+ ? MaxStoresPerMemsetOptSize : 32;
+
MaxGluedStoresPerMemcpy = 4;
+ MaxStoresPerMemcpyOptSize = 4;
+ MaxStoresPerMemcpy = Subtarget->requiresStrictAlign()
+ ? MaxStoresPerMemcpyOptSize : 16;
- MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 4;
- MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 4;
+ MaxStoresPerMemmoveOptSize = MaxStoresPerMemmove = 4;
setStackPointerRegisterToSaveRestore(AArch64::SP);
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h?rev=333429&r1=333428&r2=333429&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Tue May 29 08:58:50 2018
@@ -498,12 +498,12 @@ public:
CallingConv::ID CallConv,
bool isVarArg) const override;
private:
- bool isExtFreeImpl(const Instruction *Ext) const override;
-
/// Keep a pointer to the AArch64Subtarget around so that we can
/// make the right decision when generating code for different targets.
const AArch64Subtarget *Subtarget;
+ bool isExtFreeImpl(const Instruction *Ext) const override;
+
void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-memset-to-bzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-memset-to-bzero.ll?rev=333429&r1=333428&r2=333429&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-memset-to-bzero.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-memset-to-bzero.ll Tue May 29 08:58:50 2018
@@ -1,14 +1,14 @@
; RUN: llc %s -mtriple=arm64-apple-darwin -o - | \
-; RUN: FileCheck --check-prefix=CHECK-DARWIN --check-prefix=CHECK %s
+; RUN: FileCheck --check-prefixes=CHECK,CHECK-DARWIN %s
; RUN: llc %s -mtriple=arm64-linux-gnu -o - | \
-; RUN: FileCheck --check-prefix=CHECK-LINUX --check-prefix=CHECK %s
+; RUN: FileCheck --check-prefixes=CHECK,CHECK-LINUX %s
; <rdar://problem/14199482> ARM64: Calls to bzero() replaced with calls to memset()
; CHECK-LABEL: fct1:
; For small size (<= 256), we do not change memset to bzero.
; CHECK-DARWIN: {{b|bl}} _memset
; CHECK-LINUX: {{b|bl}} memset
-define void @fct1(i8* nocapture %ptr) {
+define void @fct1(i8* nocapture %ptr) minsize {
entry:
tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 256, i1 false)
ret void
@@ -20,7 +20,7 @@ declare void @llvm.memset.p0i8.i64(i8* n
; When the size is bigger than 256, change into bzero.
; CHECK-DARWIN: {{b|bl}} _bzero
; CHECK-LINUX: {{b|bl}} memset
-define void @fct2(i8* nocapture %ptr) {
+define void @fct2(i8* nocapture %ptr) minsize {
entry:
tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 257, i1 false)
ret void
@@ -30,7 +30,7 @@ entry:
; For unknown size, change to bzero.
; CHECK-DARWIN: {{b|bl}} _bzero
; CHECK-LINUX: {{b|bl}} memset
-define void @fct3(i8* nocapture %ptr, i32 %unknown) {
+define void @fct3(i8* nocapture %ptr, i32 %unknown) minsize {
entry:
%conv = sext i32 %unknown to i64
tail call void @llvm.memset.p0i8.i64(i8* %ptr, i8 0, i64 %conv, i1 false)
@@ -41,7 +41,7 @@ entry:
; Size <= 256, no change.
; CHECK-DARWIN: {{b|bl}} _memset
; CHECK-LINUX: {{b|bl}} memset
-define void @fct4(i8* %ptr) {
+define void @fct4(i8* %ptr) minsize {
entry:
%tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
%call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 256, i64 %tmp)
@@ -56,7 +56,7 @@ declare i64 @llvm.objectsize.i64(i8*, i1
; Size > 256, change.
; CHECK-DARWIN: {{b|bl}} _bzero
; CHECK-LINUX: {{b|bl}} memset
-define void @fct5(i8* %ptr) {
+define void @fct5(i8* %ptr) minsize {
entry:
%tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
%call = tail call i8* @__memset_chk(i8* %ptr, i32 0, i64 257, i64 %tmp)
@@ -67,7 +67,7 @@ entry:
; Size = unknown, change.
; CHECK-DARWIN: {{b|bl}} _bzero
; CHECK-LINUX: {{b|bl}} memset
-define void @fct6(i8* %ptr, i32 %unknown) {
+define void @fct6(i8* %ptr, i32 %unknown) minsize {
entry:
%conv = sext i32 %unknown to i64
%tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
@@ -82,7 +82,7 @@ entry:
; memset with something that is not a zero, no change.
; CHECK-DARWIN: {{b|bl}} _memset
; CHECK-LINUX: {{b|bl}} memset
-define void @fct7(i8* %ptr) {
+define void @fct7(i8* %ptr) minsize {
entry:
%tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
%call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 256, i64 %tmp)
@@ -93,7 +93,7 @@ entry:
; memset with something that is not a zero, no change.
; CHECK-DARWIN: {{b|bl}} _memset
; CHECK-LINUX: {{b|bl}} memset
-define void @fct8(i8* %ptr) {
+define void @fct8(i8* %ptr) minsize {
entry:
%tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
%call = tail call i8* @__memset_chk(i8* %ptr, i32 1, i64 257, i64 %tmp)
@@ -104,7 +104,7 @@ entry:
; memset with something that is not a zero, no change.
; CHECK-DARWIN: {{b|bl}} _memset
; CHECK-LINUX: {{b|bl}} memset
-define void @fct9(i8* %ptr, i32 %unknown) {
+define void @fct9(i8* %ptr, i32 %unknown) minsize {
entry:
%conv = sext i32 %unknown to i64
%tmp = tail call i64 @llvm.objectsize.i64(i8* %ptr, i1 false)
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll?rev=333429&r1=333428&r2=333429&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misaligned-memcpy-inline.ll Tue May 29 08:58:50 2018
@@ -1,14 +1,42 @@
; RUN: llc -mtriple=arm64-apple-ios -mattr=+strict-align < %s | FileCheck %s
-; Small (16-bytes here) unaligned memcpys should stay memcpy calls if
+; Small (16 bytes here) unaligned memcpy() should be a function call if
; strict-alignment is turned on.
define void @t0(i8* %out, i8* %in) {
; CHECK-LABEL: t0:
-; CHECK: orr w2, wzr, #0x10
-; CHECK-NEXT: bl _memcpy
+; CHECK: orr w2, wzr, #0x10
+; CHECK-NEXT: bl _memcpy
entry:
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 16, i1 false)
ret void
}
+; Small (16 bytes here) aligned memcpy() should be inlined even if
+; strict-alignment is turned on.
+define void @t1(i8* align 8 %out, i8* align 8 %in) {
+; CHECK-LABEL: t1:
+; CHECK: ldp x{{[0-9]+}}, x{{[0-9]+}}, [x1]
+; CHECK-NEXT: stp x{{[0-9]+}}, x{{[0-9]+}}, [x0]
+entry:
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %out, i8* align 8 %in, i64 16, i1 false)
+ ret void
+}
+
+; Tiny (4 bytes here) unaligned memcpy() should be inlined with byte sized
+; loads and stores if strict-alignment is turned on.
+define void @t2(i8* %out, i8* %in) {
+; CHECK-LABEL: t2:
+; CHECK: ldrb w{{[0-9]+}}, [x1, #3]
+; CHECK-NEXT: ldrb w{{[0-9]+}}, [x1, #2]
+; CHECK-NEXT: ldrb w{{[0-9]+}}, [x1, #1]
+; CHECK-NEXT: ldrb w{{[0-9]+}}, [x1]
+; CHECK-NEXT: strb w{{[0-9]+}}, [x0, #3]
+; CHECK-NEXT: strb w{{[0-9]+}}, [x0, #2]
+; CHECK-NEXT: strb w{{[0-9]+}}, [x0, #1]
+; CHECK-NEXT: strb w{{[0-9]+}}, [x0]
+entry:
+ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %out, i8* %in, i64 4, i1 false)
+ ret void
+}
+
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i1)
More information about the llvm-commits
mailing list