[llvm] [X86] For inline memset and memcpy with minsize, use size for alignment, rather than actual alignment (PR #87003)
Rose Silicon via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 6 13:24:01 PDT 2024
https://github.com/RSilicon updated https://github.com/llvm/llvm-project/pull/87003
>From e7e69858cfbfd2a92fe86fe5e2fa8a379f0eaeb7 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 5 Apr 2024 14:16:40 -0400
Subject: [PATCH 1/2] [X86] Pre-commit test (NFC)
---
.../CodeGen/X86/memset-vs-memset-inline.ll | 137 ++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll b/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
index b8fdd936b43895..90a3f8e785e906 100644
--- a/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
+++ b/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
@@ -163,3 +163,140 @@ define void @inlined_set_doesnt_call_external_function(ptr %a, i8 %value) nounwi
tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1024, i1 0)
ret void
}
+
+define void @memset_inlined_insize(ptr %a) nounwind minsize {
+; CHECK-LABEL: memset_inlined_insize:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
+; CHECK-NEXT: movq %rax, 1016(%rdi)
+; CHECK-NEXT: movq %rax, 1008(%rdi)
+; CHECK-NEXT: movq %rax, 1000(%rdi)
+; CHECK-NEXT: movq %rax, 992(%rdi)
+; CHECK-NEXT: movq %rax, 984(%rdi)
+; CHECK-NEXT: movq %rax, 976(%rdi)
+; CHECK-NEXT: movq %rax, 968(%rdi)
+; CHECK-NEXT: movq %rax, 960(%rdi)
+; CHECK-NEXT: movq %rax, 952(%rdi)
+; CHECK-NEXT: movq %rax, 944(%rdi)
+; CHECK-NEXT: movq %rax, 936(%rdi)
+; CHECK-NEXT: movq %rax, 928(%rdi)
+; CHECK-NEXT: movq %rax, 920(%rdi)
+; CHECK-NEXT: movq %rax, 912(%rdi)
+; CHECK-NEXT: movq %rax, 904(%rdi)
+; CHECK-NEXT: movq %rax, 896(%rdi)
+; CHECK-NEXT: movq %rax, 888(%rdi)
+; CHECK-NEXT: movq %rax, 880(%rdi)
+; CHECK-NEXT: movq %rax, 872(%rdi)
+; CHECK-NEXT: movq %rax, 864(%rdi)
+; CHECK-NEXT: movq %rax, 856(%rdi)
+; CHECK-NEXT: movq %rax, 848(%rdi)
+; CHECK-NEXT: movq %rax, 840(%rdi)
+; CHECK-NEXT: movq %rax, 832(%rdi)
+; CHECK-NEXT: movq %rax, 824(%rdi)
+; CHECK-NEXT: movq %rax, 816(%rdi)
+; CHECK-NEXT: movq %rax, 808(%rdi)
+; CHECK-NEXT: movq %rax, 800(%rdi)
+; CHECK-NEXT: movq %rax, 792(%rdi)
+; CHECK-NEXT: movq %rax, 784(%rdi)
+; CHECK-NEXT: movq %rax, 776(%rdi)
+; CHECK-NEXT: movq %rax, 768(%rdi)
+; CHECK-NEXT: movq %rax, 760(%rdi)
+; CHECK-NEXT: movq %rax, 752(%rdi)
+; CHECK-NEXT: movq %rax, 744(%rdi)
+; CHECK-NEXT: movq %rax, 736(%rdi)
+; CHECK-NEXT: movq %rax, 728(%rdi)
+; CHECK-NEXT: movq %rax, 720(%rdi)
+; CHECK-NEXT: movq %rax, 712(%rdi)
+; CHECK-NEXT: movq %rax, 704(%rdi)
+; CHECK-NEXT: movq %rax, 696(%rdi)
+; CHECK-NEXT: movq %rax, 688(%rdi)
+; CHECK-NEXT: movq %rax, 680(%rdi)
+; CHECK-NEXT: movq %rax, 672(%rdi)
+; CHECK-NEXT: movq %rax, 664(%rdi)
+; CHECK-NEXT: movq %rax, 656(%rdi)
+; CHECK-NEXT: movq %rax, 648(%rdi)
+; CHECK-NEXT: movq %rax, 640(%rdi)
+; CHECK-NEXT: movq %rax, 632(%rdi)
+; CHECK-NEXT: movq %rax, 624(%rdi)
+; CHECK-NEXT: movq %rax, 616(%rdi)
+; CHECK-NEXT: movq %rax, 608(%rdi)
+; CHECK-NEXT: movq %rax, 600(%rdi)
+; CHECK-NEXT: movq %rax, 592(%rdi)
+; CHECK-NEXT: movq %rax, 584(%rdi)
+; CHECK-NEXT: movq %rax, 576(%rdi)
+; CHECK-NEXT: movq %rax, 568(%rdi)
+; CHECK-NEXT: movq %rax, 560(%rdi)
+; CHECK-NEXT: movq %rax, 552(%rdi)
+; CHECK-NEXT: movq %rax, 544(%rdi)
+; CHECK-NEXT: movq %rax, 536(%rdi)
+; CHECK-NEXT: movq %rax, 528(%rdi)
+; CHECK-NEXT: movq %rax, 520(%rdi)
+; CHECK-NEXT: movq %rax, 512(%rdi)
+; CHECK-NEXT: movq %rax, 504(%rdi)
+; CHECK-NEXT: movq %rax, 496(%rdi)
+; CHECK-NEXT: movq %rax, 488(%rdi)
+; CHECK-NEXT: movq %rax, 480(%rdi)
+; CHECK-NEXT: movq %rax, 472(%rdi)
+; CHECK-NEXT: movq %rax, 464(%rdi)
+; CHECK-NEXT: movq %rax, 456(%rdi)
+; CHECK-NEXT: movq %rax, 448(%rdi)
+; CHECK-NEXT: movq %rax, 440(%rdi)
+; CHECK-NEXT: movq %rax, 432(%rdi)
+; CHECK-NEXT: movq %rax, 424(%rdi)
+; CHECK-NEXT: movq %rax, 416(%rdi)
+; CHECK-NEXT: movq %rax, 408(%rdi)
+; CHECK-NEXT: movq %rax, 400(%rdi)
+; CHECK-NEXT: movq %rax, 392(%rdi)
+; CHECK-NEXT: movq %rax, 384(%rdi)
+; CHECK-NEXT: movq %rax, 376(%rdi)
+; CHECK-NEXT: movq %rax, 368(%rdi)
+; CHECK-NEXT: movq %rax, 360(%rdi)
+; CHECK-NEXT: movq %rax, 352(%rdi)
+; CHECK-NEXT: movq %rax, 344(%rdi)
+; CHECK-NEXT: movq %rax, 336(%rdi)
+; CHECK-NEXT: movq %rax, 328(%rdi)
+; CHECK-NEXT: movq %rax, 320(%rdi)
+; CHECK-NEXT: movq %rax, 312(%rdi)
+; CHECK-NEXT: movq %rax, 304(%rdi)
+; CHECK-NEXT: movq %rax, 296(%rdi)
+; CHECK-NEXT: movq %rax, 288(%rdi)
+; CHECK-NEXT: movq %rax, 280(%rdi)
+; CHECK-NEXT: movq %rax, 272(%rdi)
+; CHECK-NEXT: movq %rax, 264(%rdi)
+; CHECK-NEXT: movq %rax, 256(%rdi)
+; CHECK-NEXT: movq %rax, 248(%rdi)
+; CHECK-NEXT: movq %rax, 240(%rdi)
+; CHECK-NEXT: movq %rax, 232(%rdi)
+; CHECK-NEXT: movq %rax, 224(%rdi)
+; CHECK-NEXT: movq %rax, 216(%rdi)
+; CHECK-NEXT: movq %rax, 208(%rdi)
+; CHECK-NEXT: movq %rax, 200(%rdi)
+; CHECK-NEXT: movq %rax, 192(%rdi)
+; CHECK-NEXT: movq %rax, 184(%rdi)
+; CHECK-NEXT: movq %rax, 176(%rdi)
+; CHECK-NEXT: movq %rax, 168(%rdi)
+; CHECK-NEXT: movq %rax, 160(%rdi)
+; CHECK-NEXT: movq %rax, 152(%rdi)
+; CHECK-NEXT: movq %rax, 144(%rdi)
+; CHECK-NEXT: movq %rax, 136(%rdi)
+; CHECK-NEXT: movq %rax, 128(%rdi)
+; CHECK-NEXT: movq %rax, 120(%rdi)
+; CHECK-NEXT: movq %rax, 112(%rdi)
+; CHECK-NEXT: movq %rax, 104(%rdi)
+; CHECK-NEXT: movq %rax, 96(%rdi)
+; CHECK-NEXT: movq %rax, 88(%rdi)
+; CHECK-NEXT: movq %rax, 80(%rdi)
+; CHECK-NEXT: movq %rax, 72(%rdi)
+; CHECK-NEXT: movq %rax, 64(%rdi)
+; CHECK-NEXT: movq %rax, 56(%rdi)
+; CHECK-NEXT: movq %rax, 48(%rdi)
+; CHECK-NEXT: movq %rax, 40(%rdi)
+; CHECK-NEXT: movq %rax, 32(%rdi)
+; CHECK-NEXT: movq %rax, 24(%rdi)
+; CHECK-NEXT: movq %rax, 16(%rdi)
+; CHECK-NEXT: movq %rax, 8(%rdi)
+; CHECK-NEXT: movq %rax, (%rdi)
+; CHECK-NEXT: retq
+ tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 42, i64 1024, i1 0)
+ ret void
+}
>From b27aae940faa7a9f6543ef3d35d83bed7b0c7a9c Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Thu, 28 Mar 2024 16:19:34 -0400
Subject: [PATCH 2/2] [X86] For inline memset with minsize, use stosb
This is the way to do this in the smallest encoding possible.
---
llvm/lib/Target/X86/X86SelectionDAGInfo.cpp | 240 +++++++++-------
.../CodeGen/X86/memcpy-struct-by-value.ll | 8 +-
llvm/test/CodeGen/X86/memcpy.ll | 20 +-
llvm/test/CodeGen/X86/memset-minsize.ll | 74 ++---
.../CodeGen/X86/memset-vs-memset-inline.ll | 267 +-----------------
5 files changed, 187 insertions(+), 422 deletions(-)
diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 055466ac660ccc..550b27c114e389 100644
--- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -28,6 +28,23 @@ static cl::opt<bool>
UseFSRMForMemcpy("x86-use-fsrm-for-memcpy", cl::Hidden, cl::init(false),
cl::desc("Use fast short rep mov in memcpy lowering"));
+/// Returns the best type to use with repmovs depending on alignment.
+static MVT getOptimalRepType(const X86Subtarget &Subtarget, Align Alignment) {
+ uint64_t Align = Alignment.value();
+ assert((Align != 0) && "Align is normalized");
+ assert(isPowerOf2_64(Align) && "Align is a power of 2");
+ switch (Align) {
+ case 1:
+ return MVT::i8;
+ case 2:
+ return MVT::i16;
+ case 4:
+ return MVT::i32;
+ default:
+ return Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
+ }
+}
+
bool X86SelectionDAGInfo::isBaseRegConflictPossible(
SelectionDAG &DAG, ArrayRef<MCPhysReg> ClobberSet) const {
// We cannot use TRI->hasBasePointer() until *after* we select all basic
@@ -44,92 +61,101 @@ bool X86SelectionDAGInfo::isBaseRegConflictPossible(
return llvm::is_contained(ClobberSet, TRI->getBaseRegister());
}
-SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
- SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Val,
- SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline,
- MachinePointerInfo DstPtrInfo) const {
- // If to a segment-relative address space, use the default lowering.
- if (DstPtrInfo.getAddrSpace() >= 256)
- return SDValue();
+/// Emit a single REP STOSB instruction for a particular constant size.
+static SDValue emitRepstos(const X86Subtarget &Subtarget, SelectionDAG &DAG,
+ const SDLoc &dl, SDValue Chain, SDValue Dst,
+ SDValue Val, SDValue Size, MVT AVT) {
+ const bool Use64BitRegs = Subtarget.isTarget64BitLP64();
+ unsigned AX = X86::AL;
+ switch (AVT.getSizeInBits()) {
+ case 8:
+ AX = X86::AL;
+ break;
+ case 16:
+ AX = X86::AX;
+ break;
+ case 32:
+ AX = X86::EAX;
+ break;
+ default:
+ AX = X86::RAX;
+ break;
+ }
- // If the base register might conflict with our physical registers, bail out.
- const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
- X86::ECX, X86::EAX, X86::EDI};
- if (isBaseRegConflictPossible(DAG, ClobberSet))
- return SDValue();
+ const unsigned CX = Use64BitRegs ? X86::RCX : X86::ECX;
+ const unsigned DI = Use64BitRegs ? X86::RDI : X86::EDI;
- ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- const X86Subtarget &Subtarget =
- DAG.getMachineFunction().getSubtarget<X86Subtarget>();
+ SDValue InGlue;
+ Chain = DAG.getCopyToReg(Chain, dl, AX, Val, InGlue);
+ InGlue = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, dl, CX, Size, InGlue);
+ InGlue = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, dl, DI, Dst, InGlue);
+ InGlue = Chain.getValue(1);
+
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = {Chain, DAG.getValueType(AVT), InGlue};
+ return DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
+}
+
+/// Emit a single REP STOSB instruction for a particular constant size.
+static SDValue emitRepstosB(const X86Subtarget &Subtarget, SelectionDAG &DAG,
+ const SDLoc &dl, SDValue Chain, SDValue Dst,
+ SDValue Val, uint64_t Size) {
+ return emitRepstos(Subtarget, DAG, dl, Chain, Dst, Val,
+ DAG.getIntPtrConstant(Size, dl), MVT::i8);
+}
+
+/// Returns a REP STOS instruction, possibly with a few load/stores to implement
+/// a constant size memory set. In some cases where we know REP MOVS is
+/// inefficient we return an empty SDValue so the calling code can either
+/// generate a store sequence or call the runtime memset function.
+static SDValue emitConstantSizeRepstos(SelectionDAG &DAG,
+ const X86Subtarget &Subtarget,
+ const SDLoc &dl, SDValue Chain,
+ SDValue Dst, SDValue Val, uint64_t Size,
+ EVT SizeVT, Align Alignment,
+ bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo) {
+ /// In case we optimize for size, we use repstosb even if it's less efficient
+ /// so we can save the loads/stores of the leftover.
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
+ return emitRepstosB(Subtarget, DAG, dl, Chain, Dst, Val, Size);
+
+ if (!AlwaysInline && Size > Subtarget.getMaxInlineSizeThreshold())
+ return SDValue();
// If not DWORD aligned or size is more than the threshold, call the library.
// The libc version is likely to be faster for these cases. It can use the
// address value and run time information about the CPU.
- if (Alignment < Align(4) || !ConstantSize ||
- ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold())
+ if (!AlwaysInline && Alignment < Align(4))
return SDValue();
- uint64_t SizeVal = ConstantSize->getZExtValue();
- SDValue InGlue;
- EVT AVT;
- SDValue Count;
- unsigned BytesLeft = 0;
+ MVT BlockType = MVT::i8;
+ uint64_t BlockCount = Size;
+ uint64_t BytesLeft = 0;
if (auto *ValC = dyn_cast<ConstantSDNode>(Val)) {
- unsigned ValReg;
- uint64_t Val = ValC->getZExtValue() & 255;
-
- // If the value is a constant, then we can potentially use larger sets.
- if (Alignment >= Align(4)) {
- // DWORD aligned
- AVT = MVT::i32;
- ValReg = X86::EAX;
- Val = (Val << 8) | Val;
- Val = (Val << 16) | Val;
- if (Subtarget.is64Bit() && Alignment >= Align(8)) { // QWORD aligned
- AVT = MVT::i64;
- ValReg = X86::RAX;
- Val = (Val << 32) | Val;
- }
- } else if (Alignment == Align(2)) {
- // WORD aligned
- AVT = MVT::i16;
- ValReg = X86::AX;
- Val = (Val << 8) | Val;
- } else {
- // Byte aligned
- AVT = MVT::i8;
- ValReg = X86::AL;
- Count = DAG.getIntPtrConstant(SizeVal, dl);
- }
-
- if (AVT.bitsGT(MVT::i8)) {
- unsigned UBytes = AVT.getSizeInBits() / 8;
- Count = DAG.getIntPtrConstant(SizeVal / UBytes, dl);
- BytesLeft = SizeVal % UBytes;
- }
-
- Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, dl, AVT),
- InGlue);
- InGlue = Chain.getValue(1);
- } else {
- AVT = MVT::i8;
- Count = DAG.getIntPtrConstant(SizeVal, dl);
- Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Val, InGlue);
- InGlue = Chain.getValue(1);
+ BlockType = getOptimalRepType(Subtarget, Alignment);
+ uint64_t Value = ValC->getZExtValue() & 255;
+ const uint64_t BlockBits = BlockType.getSizeInBits();
+
+ if (BlockBits >= 16)
+ Value = (Value << 8) | Value;
+
+ if (BlockBits >= 32)
+ Value = (Value << 16) | Value;
+ if (BlockBits >= 64)
+ Value = (Value << 32) | Value;
+
+ const uint64_t BlockBytes = BlockBits / 8;
+ BlockCount = Size / BlockBytes;
+ BytesLeft = Size % BlockBytes;
+ Val = DAG.getConstant(Value, dl, BlockType);
}
- bool Use64BitRegs = Subtarget.isTarget64BitLP64();
- Chain = DAG.getCopyToReg(Chain, dl, Use64BitRegs ? X86::RCX : X86::ECX,
- Count, InGlue);
- InGlue = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Use64BitRegs ? X86::RDI : X86::EDI,
- Dst, InGlue);
- InGlue = Chain.getValue(1);
-
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue Ops[] = {Chain, DAG.getValueType(AVT), InGlue};
- SDValue RepStos = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops);
-
+ SDValue RepStos =
+ emitRepstos(Subtarget, DAG, dl, Chain, Dst, Val,
+ DAG.getIntPtrConstant(BlockCount, dl), BlockType);
/// RepStos can process the whole length.
if (BytesLeft == 0)
return RepStos;
@@ -137,21 +163,45 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
// Handle the last 1 - 7 bytes.
SmallVector<SDValue, 4> Results;
Results.push_back(RepStos);
- unsigned Offset = SizeVal - BytesLeft;
+ unsigned Offset = Size - BytesLeft;
EVT AddrVT = Dst.getValueType();
- EVT SizeVT = Size.getValueType();
Results.push_back(
DAG.getMemset(Chain, dl,
DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
DAG.getConstant(Offset, dl, AddrVT)),
Val, DAG.getConstant(BytesLeft, dl, SizeVT), Alignment,
- isVolatile, AlwaysInline,
+ isVolatile, /*AlwaysInline*/ true,
/* CI */ nullptr, DstPtrInfo.getWithOffset(Offset)));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Results);
}
+SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
+ SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Val,
+ SDValue Size, Align Alignment, bool isVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo) const {
+ // If to a segment-relative address space, use the default lowering.
+ if (DstPtrInfo.getAddrSpace() >= 256)
+ return SDValue();
+
+ // If the base register might conflict with our physical registers, bail out.
+ const MCPhysReg ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
+ X86::ECX, X86::EAX, X86::EDI};
+ if (isBaseRegConflictPossible(DAG, ClobberSet))
+ return SDValue();
+
+ ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ if (!ConstantSize)
+ return SDValue();
+
+ const X86Subtarget &Subtarget =
+ DAG.getMachineFunction().getSubtarget<X86Subtarget>();
+ return emitConstantSizeRepstos(
+ DAG, Subtarget, dl, Chain, Dst, Val, ConstantSize->getZExtValue(),
+ Size.getValueType(), Alignment, isVolatile, AlwaysInline, DstPtrInfo);
+}
+
/// Emit a single REP MOVS{B,W,D,Q} instruction.
static SDValue emitRepmovs(const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl, SDValue Chain, SDValue Dst,
@@ -182,24 +232,6 @@ static SDValue emitRepmovsB(const X86Subtarget &Subtarget, SelectionDAG &DAG,
DAG.getIntPtrConstant(Size, dl), MVT::i8);
}
-/// Returns the best type to use with repmovs depending on alignment.
-static MVT getOptimalRepmovsType(const X86Subtarget &Subtarget,
- Align Alignment) {
- uint64_t Align = Alignment.value();
- assert((Align != 0) && "Align is normalized");
- assert(isPowerOf2_64(Align) && "Align is a power of 2");
- switch (Align) {
- case 1:
- return MVT::i8;
- case 2:
- return MVT::i16;
- case 4:
- return MVT::i32;
- default:
- return Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
- }
-}
-
/// Returns a REP MOVS instruction, possibly with a few load/stores to implement
/// a constant size memory copy. In some cases where we know REP MOVS is
/// inefficient we return an empty SDValue so the calling code can either
@@ -209,6 +241,10 @@ static SDValue emitConstantSizeRepmov(
SDValue Chain, SDValue Dst, SDValue Src, uint64_t Size, EVT SizeVT,
Align Alignment, bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) {
+ /// In case we optimize for size, we use repmovsb even if it's less efficient
+ /// so we can save the loads/stores of the leftover.
+ if (DAG.getMachineFunction().getFunction().hasMinSize())
+ return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size);
/// TODO: Revisit next line: big copy with ERMSB on march >= haswell are very
/// efficient.
@@ -220,12 +256,13 @@ static SDValue emitConstantSizeRepmov(
return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size);
assert(!Subtarget.hasERMSB() && "No efficient RepMovs");
+
/// We assume runtime memcpy will do a better job for unaligned copies when
/// ERMS is not present.
- if (!AlwaysInline && (Alignment.value() & 3) != 0)
+ if (!AlwaysInline && (Alignment < Align(4)))
return SDValue();
- const MVT BlockType = getOptimalRepmovsType(Subtarget, Alignment);
+ const MVT BlockType = getOptimalRepType(Subtarget, Alignment);
const uint64_t BlockBytes = BlockType.getSizeInBits() / 8;
const uint64_t BlockCount = Size / BlockBytes;
const uint64_t BytesLeft = Size % BlockBytes;
@@ -239,11 +276,6 @@ static SDValue emitConstantSizeRepmov(
assert(BytesLeft && "We have leftover at this point");
- /// In case we optimize for size we use repmovsb even if it's less efficient
- /// so we can save the loads/stores of the leftover.
- if (DAG.getMachineFunction().getFunction().hasMinSize())
- return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size);
-
// Handle the last 1 - 7 bytes.
SmallVector<SDValue, 4> Results;
Results.push_back(RepMovs);
@@ -282,7 +314,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
if (UseFSRMForMemcpy && Subtarget.hasFSRM())
return emitRepmovs(Subtarget, DAG, dl, Chain, Dst, Src, Size, MVT::i8);
- /// Handle constant sizes,
+ /// Handle constant sizes
if (ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size))
return emitConstantSizeRepmov(DAG, Subtarget, dl, Chain, Dst, Src,
ConstantSize->getZExtValue(),
diff --git a/llvm/test/CodeGen/X86/memcpy-struct-by-value.ll b/llvm/test/CodeGen/X86/memcpy-struct-by-value.ll
index 8bc4098b0f7c60..f6b1e487000976 100644
--- a/llvm/test/CodeGen/X86/memcpy-struct-by-value.ll
+++ b/llvm/test/CodeGen/X86/memcpy-struct-by-value.ll
@@ -78,9 +78,9 @@ define void @test2(ptr nocapture %x) nounwind minsize {
; NOFAST32-NEXT: pushl %esi
; NOFAST32-NEXT: subl $4100, %esp # imm = 0x1004
; NOFAST32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; NOFAST32-NEXT: movl $1024, %ecx # imm = 0x400
+; NOFAST32-NEXT: movl $4096, %ecx # imm = 0x1000
; NOFAST32-NEXT: movl %esp, %edi
-; NOFAST32-NEXT: rep;movsl (%esi), %es:(%edi)
+; NOFAST32-NEXT: rep;movsb (%esi), %es:(%edi)
; NOFAST32-NEXT: calll foo at PLT
; NOFAST32-NEXT: addl $4100, %esp # imm = 0x1004
; NOFAST32-NEXT: popl %esi
@@ -106,9 +106,9 @@ define void @test2(ptr nocapture %x) nounwind minsize {
; NOFAST: # %bb.0:
; NOFAST-NEXT: subq $4104, %rsp # imm = 0x1008
; NOFAST-NEXT: movq %rdi, %rsi
-; NOFAST-NEXT: movl $512, %ecx # imm = 0x200
+; NOFAST-NEXT: movl $4096, %ecx # imm = 0x1000
; NOFAST-NEXT: movq %rsp, %rdi
-; NOFAST-NEXT: rep;movsq (%rsi), %es:(%rdi)
+; NOFAST-NEXT: rep;movsb (%rsi), %es:(%rdi)
; NOFAST-NEXT: callq foo at PLT
; NOFAST-NEXT: addq $4104, %rsp # imm = 0x1008
; NOFAST-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/memcpy.ll b/llvm/test/CodeGen/X86/memcpy.ll
index 6ec9b20163051b..ff026b142ecf3c 100644
--- a/llvm/test/CodeGen/X86/memcpy.ll
+++ b/llvm/test/CodeGen/X86/memcpy.ll
@@ -202,14 +202,16 @@ define void @test3_minsize(ptr nocapture %A, ptr nocapture %B) nounwind minsize
; DARWIN-LABEL: test3_minsize:
; DARWIN: ## %bb.0:
; DARWIN-NEXT: pushq $64
-; DARWIN-NEXT: popq %rdx
-; DARWIN-NEXT: jmp _memcpy ## TAILCALL
+; DARWIN-NEXT: popq %rcx
+; DARWIN-NEXT: rep;movsb (%rsi), %es:(%rdi)
+; DARWIN-NEXT: retq
;
; LINUX-LABEL: test3_minsize:
; LINUX: # %bb.0:
; LINUX-NEXT: pushq $64
-; LINUX-NEXT: popq %rdx
-; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
+; LINUX-NEXT: popq %rcx
+; LINUX-NEXT: rep;movsb (%rsi), %es:(%rdi)
+; LINUX-NEXT: retq
;
; LINUX-SKL-LABEL: test3_minsize:
; LINUX-SKL: # %bb.0:
@@ -249,14 +251,16 @@ define void @test3_minsize_optsize(ptr nocapture %A, ptr nocapture %B) nounwind
; DARWIN-LABEL: test3_minsize_optsize:
; DARWIN: ## %bb.0:
; DARWIN-NEXT: pushq $64
-; DARWIN-NEXT: popq %rdx
-; DARWIN-NEXT: jmp _memcpy ## TAILCALL
+; DARWIN-NEXT: popq %rcx
+; DARWIN-NEXT: rep;movsb (%rsi), %es:(%rdi)
+; DARWIN-NEXT: retq
;
; LINUX-LABEL: test3_minsize_optsize:
; LINUX: # %bb.0:
; LINUX-NEXT: pushq $64
-; LINUX-NEXT: popq %rdx
-; LINUX-NEXT: jmp memcpy at PLT # TAILCALL
+; LINUX-NEXT: popq %rcx
+; LINUX-NEXT: rep;movsb (%rsi), %es:(%rdi)
+; LINUX-NEXT: retq
;
; LINUX-SKL-LABEL: test3_minsize_optsize:
; LINUX-SKL: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/memset-minsize.ll b/llvm/test/CodeGen/X86/memset-minsize.ll
index cc0f2156262bba..23399b07acfbd6 100644
--- a/llvm/test/CodeGen/X86/memset-minsize.ll
+++ b/llvm/test/CodeGen/X86/memset-minsize.ll
@@ -14,10 +14,9 @@ entry:
define void @small_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: small_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq $32
-; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: movl $128, %ecx
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: rep;stosl %eax, %es:(%rdi)
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 128, i1 false)
@@ -27,11 +26,9 @@ entry:
define void @medium_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: medium_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $512, %edx # imm = 0x200
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $512, %ecx # imm = 0x200
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 512, i1 false)
@@ -41,11 +38,9 @@ entry:
define void @large_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: large_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $4096, %edx # imm = 0x1000
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $4096, %ecx # imm = 0x1000
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 4096, i1 false)
@@ -55,11 +50,9 @@ entry:
define void @huge_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: huge_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $8192, %edx # imm = 0x2000
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $8192, %ecx # imm = 0x2000
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 8192, i1 false)
@@ -69,11 +62,9 @@ entry:
define void @odd_length_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: odd_length_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $255, %edx
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $255, %ecx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 255, i1 false)
@@ -83,11 +74,9 @@ entry:
define void @align_1_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: align_1_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $256, %edx # imm = 0x100
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $256, %ecx # imm = 0x100
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 1 %ptr, i8 0, i32 256, i1 false)
@@ -97,11 +86,9 @@ entry:
define void @align_2_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: align_2_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $256, %edx # imm = 0x100
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $256, %ecx # imm = 0x100
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 2 %ptr, i8 0, i32 256, i1 false)
@@ -111,11 +98,9 @@ entry:
define void @align_4_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: align_4_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $256, %edx # imm = 0x100
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $256, %ecx # imm = 0x100
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 4 %ptr, i8 0, i32 256, i1 false)
@@ -125,11 +110,9 @@ entry:
define void @align_8_memset_to_rep_stos(ptr %ptr) minsize nounwind {
; CHECK-LABEL: align_8_memset_to_rep_stos:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq %rax
-; CHECK-NEXT: movl $256, %edx # imm = 0x100
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: callq memset at PLT
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: movl $256, %ecx # imm = 0x100
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i32(ptr align 8 %ptr, i8 0, i32 256, i1 false)
@@ -139,10 +122,9 @@ entry:
define void @small_memset_to_rep_stos_64(ptr %ptr) minsize nounwind {
; CHECK-LABEL: small_memset_to_rep_stos_64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: pushq $16
-; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: movl $128, %ecx
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: rep;stosq %rax, %es:(%rdi)
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
entry:
call void @llvm.memset.p0.i64(ptr align 8 %ptr, i8 0, i64 128, i1 false)
diff --git a/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll b/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
index 90a3f8e785e906..7786d380d7fb4a 100644
--- a/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
+++ b/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
@@ -28,137 +28,10 @@ define void @regular_memset_calls_external_function(ptr %a, i8 %value) nounwind
define void @inlined_set_doesnt_call_external_function(ptr %a, i8 %value) nounwind {
; CHECK-LABEL: inlined_set_doesnt_call_external_function:
; CHECK: # %bb.0:
-; CHECK-NEXT: movzbl %sil, %ecx
-; CHECK-NEXT: movabsq $72340172838076673, %rax # imm = 0x101010101010101
-; CHECK-NEXT: imulq %rcx, %rax
-; CHECK-NEXT: movq %rax, 1016(%rdi)
-; CHECK-NEXT: movq %rax, 1008(%rdi)
-; CHECK-NEXT: movq %rax, 1000(%rdi)
-; CHECK-NEXT: movq %rax, 992(%rdi)
-; CHECK-NEXT: movq %rax, 984(%rdi)
-; CHECK-NEXT: movq %rax, 976(%rdi)
-; CHECK-NEXT: movq %rax, 968(%rdi)
-; CHECK-NEXT: movq %rax, 960(%rdi)
-; CHECK-NEXT: movq %rax, 952(%rdi)
-; CHECK-NEXT: movq %rax, 944(%rdi)
-; CHECK-NEXT: movq %rax, 936(%rdi)
-; CHECK-NEXT: movq %rax, 928(%rdi)
-; CHECK-NEXT: movq %rax, 920(%rdi)
-; CHECK-NEXT: movq %rax, 912(%rdi)
-; CHECK-NEXT: movq %rax, 904(%rdi)
-; CHECK-NEXT: movq %rax, 896(%rdi)
-; CHECK-NEXT: movq %rax, 888(%rdi)
-; CHECK-NEXT: movq %rax, 880(%rdi)
-; CHECK-NEXT: movq %rax, 872(%rdi)
-; CHECK-NEXT: movq %rax, 864(%rdi)
-; CHECK-NEXT: movq %rax, 856(%rdi)
-; CHECK-NEXT: movq %rax, 848(%rdi)
-; CHECK-NEXT: movq %rax, 840(%rdi)
-; CHECK-NEXT: movq %rax, 832(%rdi)
-; CHECK-NEXT: movq %rax, 824(%rdi)
-; CHECK-NEXT: movq %rax, 816(%rdi)
-; CHECK-NEXT: movq %rax, 808(%rdi)
-; CHECK-NEXT: movq %rax, 800(%rdi)
-; CHECK-NEXT: movq %rax, 792(%rdi)
-; CHECK-NEXT: movq %rax, 784(%rdi)
-; CHECK-NEXT: movq %rax, 776(%rdi)
-; CHECK-NEXT: movq %rax, 768(%rdi)
-; CHECK-NEXT: movq %rax, 760(%rdi)
-; CHECK-NEXT: movq %rax, 752(%rdi)
-; CHECK-NEXT: movq %rax, 744(%rdi)
-; CHECK-NEXT: movq %rax, 736(%rdi)
-; CHECK-NEXT: movq %rax, 728(%rdi)
-; CHECK-NEXT: movq %rax, 720(%rdi)
-; CHECK-NEXT: movq %rax, 712(%rdi)
-; CHECK-NEXT: movq %rax, 704(%rdi)
-; CHECK-NEXT: movq %rax, 696(%rdi)
-; CHECK-NEXT: movq %rax, 688(%rdi)
-; CHECK-NEXT: movq %rax, 680(%rdi)
-; CHECK-NEXT: movq %rax, 672(%rdi)
-; CHECK-NEXT: movq %rax, 664(%rdi)
-; CHECK-NEXT: movq %rax, 656(%rdi)
-; CHECK-NEXT: movq %rax, 648(%rdi)
-; CHECK-NEXT: movq %rax, 640(%rdi)
-; CHECK-NEXT: movq %rax, 632(%rdi)
-; CHECK-NEXT: movq %rax, 624(%rdi)
-; CHECK-NEXT: movq %rax, 616(%rdi)
-; CHECK-NEXT: movq %rax, 608(%rdi)
-; CHECK-NEXT: movq %rax, 600(%rdi)
-; CHECK-NEXT: movq %rax, 592(%rdi)
-; CHECK-NEXT: movq %rax, 584(%rdi)
-; CHECK-NEXT: movq %rax, 576(%rdi)
-; CHECK-NEXT: movq %rax, 568(%rdi)
-; CHECK-NEXT: movq %rax, 560(%rdi)
-; CHECK-NEXT: movq %rax, 552(%rdi)
-; CHECK-NEXT: movq %rax, 544(%rdi)
-; CHECK-NEXT: movq %rax, 536(%rdi)
-; CHECK-NEXT: movq %rax, 528(%rdi)
-; CHECK-NEXT: movq %rax, 520(%rdi)
-; CHECK-NEXT: movq %rax, 512(%rdi)
-; CHECK-NEXT: movq %rax, 504(%rdi)
-; CHECK-NEXT: movq %rax, 496(%rdi)
-; CHECK-NEXT: movq %rax, 488(%rdi)
-; CHECK-NEXT: movq %rax, 480(%rdi)
-; CHECK-NEXT: movq %rax, 472(%rdi)
-; CHECK-NEXT: movq %rax, 464(%rdi)
-; CHECK-NEXT: movq %rax, 456(%rdi)
-; CHECK-NEXT: movq %rax, 448(%rdi)
-; CHECK-NEXT: movq %rax, 440(%rdi)
-; CHECK-NEXT: movq %rax, 432(%rdi)
-; CHECK-NEXT: movq %rax, 424(%rdi)
-; CHECK-NEXT: movq %rax, 416(%rdi)
-; CHECK-NEXT: movq %rax, 408(%rdi)
-; CHECK-NEXT: movq %rax, 400(%rdi)
-; CHECK-NEXT: movq %rax, 392(%rdi)
-; CHECK-NEXT: movq %rax, 384(%rdi)
-; CHECK-NEXT: movq %rax, 376(%rdi)
-; CHECK-NEXT: movq %rax, 368(%rdi)
-; CHECK-NEXT: movq %rax, 360(%rdi)
-; CHECK-NEXT: movq %rax, 352(%rdi)
-; CHECK-NEXT: movq %rax, 344(%rdi)
-; CHECK-NEXT: movq %rax, 336(%rdi)
-; CHECK-NEXT: movq %rax, 328(%rdi)
-; CHECK-NEXT: movq %rax, 320(%rdi)
-; CHECK-NEXT: movq %rax, 312(%rdi)
-; CHECK-NEXT: movq %rax, 304(%rdi)
-; CHECK-NEXT: movq %rax, 296(%rdi)
-; CHECK-NEXT: movq %rax, 288(%rdi)
-; CHECK-NEXT: movq %rax, 280(%rdi)
-; CHECK-NEXT: movq %rax, 272(%rdi)
-; CHECK-NEXT: movq %rax, 264(%rdi)
-; CHECK-NEXT: movq %rax, 256(%rdi)
-; CHECK-NEXT: movq %rax, 248(%rdi)
-; CHECK-NEXT: movq %rax, 240(%rdi)
-; CHECK-NEXT: movq %rax, 232(%rdi)
-; CHECK-NEXT: movq %rax, 224(%rdi)
-; CHECK-NEXT: movq %rax, 216(%rdi)
-; CHECK-NEXT: movq %rax, 208(%rdi)
-; CHECK-NEXT: movq %rax, 200(%rdi)
-; CHECK-NEXT: movq %rax, 192(%rdi)
-; CHECK-NEXT: movq %rax, 184(%rdi)
-; CHECK-NEXT: movq %rax, 176(%rdi)
-; CHECK-NEXT: movq %rax, 168(%rdi)
-; CHECK-NEXT: movq %rax, 160(%rdi)
-; CHECK-NEXT: movq %rax, 152(%rdi)
-; CHECK-NEXT: movq %rax, 144(%rdi)
-; CHECK-NEXT: movq %rax, 136(%rdi)
-; CHECK-NEXT: movq %rax, 128(%rdi)
-; CHECK-NEXT: movq %rax, 120(%rdi)
-; CHECK-NEXT: movq %rax, 112(%rdi)
-; CHECK-NEXT: movq %rax, 104(%rdi)
-; CHECK-NEXT: movq %rax, 96(%rdi)
-; CHECK-NEXT: movq %rax, 88(%rdi)
-; CHECK-NEXT: movq %rax, 80(%rdi)
-; CHECK-NEXT: movq %rax, 72(%rdi)
-; CHECK-NEXT: movq %rax, 64(%rdi)
-; CHECK-NEXT: movq %rax, 56(%rdi)
-; CHECK-NEXT: movq %rax, 48(%rdi)
-; CHECK-NEXT: movq %rax, 40(%rdi)
-; CHECK-NEXT: movq %rax, 32(%rdi)
-; CHECK-NEXT: movq %rax, 24(%rdi)
-; CHECK-NEXT: movq %rax, 16(%rdi)
-; CHECK-NEXT: movq %rax, 8(%rdi)
-; CHECK-NEXT: movq %rax, (%rdi)
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: movl $1024, %ecx # imm = 0x400
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1024, i1 0)
ret void
@@ -167,135 +40,9 @@ define void @inlined_set_doesnt_call_external_function(ptr %a, i8 %value) nounwi
define void @memset_inlined_insize(ptr %a) nounwind minsize {
; CHECK-LABEL: memset_inlined_insize:
; CHECK: # %bb.0:
-; CHECK-NEXT: movabsq $3038287259199220266, %rax # imm = 0x2A2A2A2A2A2A2A2A
-; CHECK-NEXT: movq %rax, 1016(%rdi)
-; CHECK-NEXT: movq %rax, 1008(%rdi)
-; CHECK-NEXT: movq %rax, 1000(%rdi)
-; CHECK-NEXT: movq %rax, 992(%rdi)
-; CHECK-NEXT: movq %rax, 984(%rdi)
-; CHECK-NEXT: movq %rax, 976(%rdi)
-; CHECK-NEXT: movq %rax, 968(%rdi)
-; CHECK-NEXT: movq %rax, 960(%rdi)
-; CHECK-NEXT: movq %rax, 952(%rdi)
-; CHECK-NEXT: movq %rax, 944(%rdi)
-; CHECK-NEXT: movq %rax, 936(%rdi)
-; CHECK-NEXT: movq %rax, 928(%rdi)
-; CHECK-NEXT: movq %rax, 920(%rdi)
-; CHECK-NEXT: movq %rax, 912(%rdi)
-; CHECK-NEXT: movq %rax, 904(%rdi)
-; CHECK-NEXT: movq %rax, 896(%rdi)
-; CHECK-NEXT: movq %rax, 888(%rdi)
-; CHECK-NEXT: movq %rax, 880(%rdi)
-; CHECK-NEXT: movq %rax, 872(%rdi)
-; CHECK-NEXT: movq %rax, 864(%rdi)
-; CHECK-NEXT: movq %rax, 856(%rdi)
-; CHECK-NEXT: movq %rax, 848(%rdi)
-; CHECK-NEXT: movq %rax, 840(%rdi)
-; CHECK-NEXT: movq %rax, 832(%rdi)
-; CHECK-NEXT: movq %rax, 824(%rdi)
-; CHECK-NEXT: movq %rax, 816(%rdi)
-; CHECK-NEXT: movq %rax, 808(%rdi)
-; CHECK-NEXT: movq %rax, 800(%rdi)
-; CHECK-NEXT: movq %rax, 792(%rdi)
-; CHECK-NEXT: movq %rax, 784(%rdi)
-; CHECK-NEXT: movq %rax, 776(%rdi)
-; CHECK-NEXT: movq %rax, 768(%rdi)
-; CHECK-NEXT: movq %rax, 760(%rdi)
-; CHECK-NEXT: movq %rax, 752(%rdi)
-; CHECK-NEXT: movq %rax, 744(%rdi)
-; CHECK-NEXT: movq %rax, 736(%rdi)
-; CHECK-NEXT: movq %rax, 728(%rdi)
-; CHECK-NEXT: movq %rax, 720(%rdi)
-; CHECK-NEXT: movq %rax, 712(%rdi)
-; CHECK-NEXT: movq %rax, 704(%rdi)
-; CHECK-NEXT: movq %rax, 696(%rdi)
-; CHECK-NEXT: movq %rax, 688(%rdi)
-; CHECK-NEXT: movq %rax, 680(%rdi)
-; CHECK-NEXT: movq %rax, 672(%rdi)
-; CHECK-NEXT: movq %rax, 664(%rdi)
-; CHECK-NEXT: movq %rax, 656(%rdi)
-; CHECK-NEXT: movq %rax, 648(%rdi)
-; CHECK-NEXT: movq %rax, 640(%rdi)
-; CHECK-NEXT: movq %rax, 632(%rdi)
-; CHECK-NEXT: movq %rax, 624(%rdi)
-; CHECK-NEXT: movq %rax, 616(%rdi)
-; CHECK-NEXT: movq %rax, 608(%rdi)
-; CHECK-NEXT: movq %rax, 600(%rdi)
-; CHECK-NEXT: movq %rax, 592(%rdi)
-; CHECK-NEXT: movq %rax, 584(%rdi)
-; CHECK-NEXT: movq %rax, 576(%rdi)
-; CHECK-NEXT: movq %rax, 568(%rdi)
-; CHECK-NEXT: movq %rax, 560(%rdi)
-; CHECK-NEXT: movq %rax, 552(%rdi)
-; CHECK-NEXT: movq %rax, 544(%rdi)
-; CHECK-NEXT: movq %rax, 536(%rdi)
-; CHECK-NEXT: movq %rax, 528(%rdi)
-; CHECK-NEXT: movq %rax, 520(%rdi)
-; CHECK-NEXT: movq %rax, 512(%rdi)
-; CHECK-NEXT: movq %rax, 504(%rdi)
-; CHECK-NEXT: movq %rax, 496(%rdi)
-; CHECK-NEXT: movq %rax, 488(%rdi)
-; CHECK-NEXT: movq %rax, 480(%rdi)
-; CHECK-NEXT: movq %rax, 472(%rdi)
-; CHECK-NEXT: movq %rax, 464(%rdi)
-; CHECK-NEXT: movq %rax, 456(%rdi)
-; CHECK-NEXT: movq %rax, 448(%rdi)
-; CHECK-NEXT: movq %rax, 440(%rdi)
-; CHECK-NEXT: movq %rax, 432(%rdi)
-; CHECK-NEXT: movq %rax, 424(%rdi)
-; CHECK-NEXT: movq %rax, 416(%rdi)
-; CHECK-NEXT: movq %rax, 408(%rdi)
-; CHECK-NEXT: movq %rax, 400(%rdi)
-; CHECK-NEXT: movq %rax, 392(%rdi)
-; CHECK-NEXT: movq %rax, 384(%rdi)
-; CHECK-NEXT: movq %rax, 376(%rdi)
-; CHECK-NEXT: movq %rax, 368(%rdi)
-; CHECK-NEXT: movq %rax, 360(%rdi)
-; CHECK-NEXT: movq %rax, 352(%rdi)
-; CHECK-NEXT: movq %rax, 344(%rdi)
-; CHECK-NEXT: movq %rax, 336(%rdi)
-; CHECK-NEXT: movq %rax, 328(%rdi)
-; CHECK-NEXT: movq %rax, 320(%rdi)
-; CHECK-NEXT: movq %rax, 312(%rdi)
-; CHECK-NEXT: movq %rax, 304(%rdi)
-; CHECK-NEXT: movq %rax, 296(%rdi)
-; CHECK-NEXT: movq %rax, 288(%rdi)
-; CHECK-NEXT: movq %rax, 280(%rdi)
-; CHECK-NEXT: movq %rax, 272(%rdi)
-; CHECK-NEXT: movq %rax, 264(%rdi)
-; CHECK-NEXT: movq %rax, 256(%rdi)
-; CHECK-NEXT: movq %rax, 248(%rdi)
-; CHECK-NEXT: movq %rax, 240(%rdi)
-; CHECK-NEXT: movq %rax, 232(%rdi)
-; CHECK-NEXT: movq %rax, 224(%rdi)
-; CHECK-NEXT: movq %rax, 216(%rdi)
-; CHECK-NEXT: movq %rax, 208(%rdi)
-; CHECK-NEXT: movq %rax, 200(%rdi)
-; CHECK-NEXT: movq %rax, 192(%rdi)
-; CHECK-NEXT: movq %rax, 184(%rdi)
-; CHECK-NEXT: movq %rax, 176(%rdi)
-; CHECK-NEXT: movq %rax, 168(%rdi)
-; CHECK-NEXT: movq %rax, 160(%rdi)
-; CHECK-NEXT: movq %rax, 152(%rdi)
-; CHECK-NEXT: movq %rax, 144(%rdi)
-; CHECK-NEXT: movq %rax, 136(%rdi)
-; CHECK-NEXT: movq %rax, 128(%rdi)
-; CHECK-NEXT: movq %rax, 120(%rdi)
-; CHECK-NEXT: movq %rax, 112(%rdi)
-; CHECK-NEXT: movq %rax, 104(%rdi)
-; CHECK-NEXT: movq %rax, 96(%rdi)
-; CHECK-NEXT: movq %rax, 88(%rdi)
-; CHECK-NEXT: movq %rax, 80(%rdi)
-; CHECK-NEXT: movq %rax, 72(%rdi)
-; CHECK-NEXT: movq %rax, 64(%rdi)
-; CHECK-NEXT: movq %rax, 56(%rdi)
-; CHECK-NEXT: movq %rax, 48(%rdi)
-; CHECK-NEXT: movq %rax, 40(%rdi)
-; CHECK-NEXT: movq %rax, 32(%rdi)
-; CHECK-NEXT: movq %rax, 24(%rdi)
-; CHECK-NEXT: movq %rax, 16(%rdi)
-; CHECK-NEXT: movq %rax, 8(%rdi)
-; CHECK-NEXT: movq %rax, (%rdi)
+; CHECK-NEXT: movl $1024, %ecx # imm = 0x400
+; CHECK-NEXT: movb $42, %al
+; CHECK-NEXT: rep;stosb %al, %es:(%rdi)
; CHECK-NEXT: retq
tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 42, i64 1024, i1 0)
ret void
More information about the llvm-commits
mailing list