[llvm] [X86] Use AlwaysInline to determine whether to emit code or bail when inlining is determined to be unprofitable (PR #87003)

via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 5 08:47:25 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/87003

>From e88fbe65d8434af4b18a8220cdfe52cf7b67e457 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Thu, 28 Mar 2024 16:19:34 -0400
Subject: [PATCH] [X86] Use AlwaysInline to determine whether to emit code or
 bail when inlining is determined to be unprofitable

Assume true when we get to the getMemset code, as it ought to be profitable by then.

Just like how getMemcpy works.
---
 llvm/lib/Target/X86/X86SelectionDAGInfo.cpp   |  73 ++++++++--
 .../CodeGen/X86/memset-vs-memset-inline.ll    | 135 +-----------------
 2 files changed, 66 insertions(+), 142 deletions(-)

diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index 7c630a2b0da080..1419569f442d93 100644
--- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -63,14 +63,41 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
   if (DstPtrInfo.getAddrSpace() >= 256)
     return SDValue();
 
+  if (!ConstantSize)
+    return SDValue();
+
   // If not DWORD aligned or size is more than the threshold, call the library.
   // The libc version is likely to be faster for these cases. It can use the
   // address value and run time information about the CPU.
-  if (Alignment < Align(4) || !ConstantSize ||
-      ConstantSize->getZExtValue() > Subtarget.getMaxInlineSizeThreshold()) 
-    return SDValue();
+  //
+  // If we MUST inline, then don't care about the alignment.
+  // On x86, the CPU doesn't care and neither should you.
+  // As long as the count is aligned, we can use less instructions.
+  //
+  // They're not slower than reading the same value byte by byte
+  // They're slower than aligned reads.
+  // But we don't have the option here.
+  // Really, the slowdown comes from the fact that, if the value straddles
+  // cache lines, the CPU has to read in two cache lines (likewise with
+  // writes) but that's unavoidable here, since we have to read the range one
+  // way or another so  we take the penalty either way.
+  //
+  // We need to instead change the alignment to be based on the actual type.
 
   uint64_t SizeVal = ConstantSize->getZExtValue();
+  if (AlwaysInline || DAG.getMachineFunction().getFunction().hasMinSize()) {
+    if (SizeVal & 7 == 0 && Subtarget.is64Bit())
+      Alignment = Align(8);
+    else if (SizeVal & 3 == 0)
+      Alignment = Align(4);
+    else if (SizeVal & 1 == 0)
+      Alignment = Align(2);
+    else
+      Alignment = Align(1);
+  } else if (Alignment < Align(4) ||
+             SizeVal > Subtarget.getMaxInlineSizeThreshold())
+    return SDValue();
+
   SDValue InGlue;
   EVT AVT;
   SDValue Count;
@@ -142,7 +169,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemset(
                       DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
                                   DAG.getConstant(Offset, dl, AddrVT)),
                       Val, DAG.getConstant(BytesLeft, dl, SizeVT), Alignment,
-                      isVolatile, AlwaysInline,
+                      isVolatile, /* AlwaysInline */ true,
                       /* isTailCall */ false, DstPtrInfo.getWithOffset(Offset));
   }
 
@@ -208,19 +235,43 @@ static SDValue emitConstantSizeRepmov(
     Align Alignment, bool isVolatile, bool AlwaysInline,
     MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) {
 
-  /// TODO: Revisit next line: big copy with ERMSB on march >= haswell are very
-  /// efficient.
-  if (!AlwaysInline && Size > Subtarget.getMaxInlineSizeThreshold())
-    return SDValue();
+  // If we MUST inline, then don't care about the alignment.
+  // On x86, the CPU doesn't care and neither should you
+  // as long as the count is aligned.
+  //
+  // They're not slower than reading the same value byte by byte
+  // They're slower than aligned reads.
+  // But we don't have the option here.
+  // Really, the slowdown comes from the fact that, if the value straddles
+  // cache lines, the CPU has to read in two cache lines (likewise with
+  // writes) but that's unavoidable here, since you have to read the range one
+  // way or another so you we take the penalty either way.
+
+  // We need to instead change the alignment to be based on the actual type.
+
+  /// In case we optimize for size we use repmovsb even if it's less efficient
+  /// so we can save the loads/stores of the leftover.
 
   /// If we have enhanced repmovs we use it.
   if (Subtarget.hasERMSB())
     return emitRepmovsB(Subtarget, DAG, dl, Chain, Dst, Src, Size);
 
   assert(!Subtarget.hasERMSB() && "No efficient RepMovs");
-  /// We assume runtime memcpy will do a better job for unaligned copies when
-  /// ERMS is not present.
-  if (!AlwaysInline && (Alignment.value() & 3) != 0)
+
+  if (AlwaysInline || DAG.getMachineFunction().getFunction().hasMinSize()) {
+    if (Size & 7 == 0 && Subtarget.is64Bit())
+      Alignment = Align(8);
+    else if (Size & 3 == 0)
+      Alignment = Align(4);
+    else if (Size & 1 == 0)
+      Alignment = Align(2);
+    else
+      Alignment = Align(1);
+
+    /// We assume runtime memcpy will do a better job for unaligned copies when
+    /// ERMS is not present.
+  } else if (Alignment < Align(4) ||
+             Size > Subtarget.getMaxInlineSizeThreshold())
     return SDValue();
 
   const MVT BlockType = getOptimalRepmovsType(Subtarget, Alignment);
diff --git a/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll b/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
index b8fdd936b43895..16022c6cbb3934 100644
--- a/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
+++ b/llvm/test/CodeGen/X86/memset-vs-memset-inline.ll
@@ -28,137 +28,10 @@ define void @regular_memset_calls_external_function(ptr %a, i8 %value) nounwind
 define void @inlined_set_doesnt_call_external_function(ptr %a, i8 %value) nounwind {
 ; CHECK-LABEL: inlined_set_doesnt_call_external_function:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    movzbl %sil, %ecx
-; CHECK-NEXT:    movabsq $72340172838076673, %rax # imm = 0x101010101010101
-; CHECK-NEXT:    imulq %rcx, %rax
-; CHECK-NEXT:    movq %rax, 1016(%rdi)
-; CHECK-NEXT:    movq %rax, 1008(%rdi)
-; CHECK-NEXT:    movq %rax, 1000(%rdi)
-; CHECK-NEXT:    movq %rax, 992(%rdi)
-; CHECK-NEXT:    movq %rax, 984(%rdi)
-; CHECK-NEXT:    movq %rax, 976(%rdi)
-; CHECK-NEXT:    movq %rax, 968(%rdi)
-; CHECK-NEXT:    movq %rax, 960(%rdi)
-; CHECK-NEXT:    movq %rax, 952(%rdi)
-; CHECK-NEXT:    movq %rax, 944(%rdi)
-; CHECK-NEXT:    movq %rax, 936(%rdi)
-; CHECK-NEXT:    movq %rax, 928(%rdi)
-; CHECK-NEXT:    movq %rax, 920(%rdi)
-; CHECK-NEXT:    movq %rax, 912(%rdi)
-; CHECK-NEXT:    movq %rax, 904(%rdi)
-; CHECK-NEXT:    movq %rax, 896(%rdi)
-; CHECK-NEXT:    movq %rax, 888(%rdi)
-; CHECK-NEXT:    movq %rax, 880(%rdi)
-; CHECK-NEXT:    movq %rax, 872(%rdi)
-; CHECK-NEXT:    movq %rax, 864(%rdi)
-; CHECK-NEXT:    movq %rax, 856(%rdi)
-; CHECK-NEXT:    movq %rax, 848(%rdi)
-; CHECK-NEXT:    movq %rax, 840(%rdi)
-; CHECK-NEXT:    movq %rax, 832(%rdi)
-; CHECK-NEXT:    movq %rax, 824(%rdi)
-; CHECK-NEXT:    movq %rax, 816(%rdi)
-; CHECK-NEXT:    movq %rax, 808(%rdi)
-; CHECK-NEXT:    movq %rax, 800(%rdi)
-; CHECK-NEXT:    movq %rax, 792(%rdi)
-; CHECK-NEXT:    movq %rax, 784(%rdi)
-; CHECK-NEXT:    movq %rax, 776(%rdi)
-; CHECK-NEXT:    movq %rax, 768(%rdi)
-; CHECK-NEXT:    movq %rax, 760(%rdi)
-; CHECK-NEXT:    movq %rax, 752(%rdi)
-; CHECK-NEXT:    movq %rax, 744(%rdi)
-; CHECK-NEXT:    movq %rax, 736(%rdi)
-; CHECK-NEXT:    movq %rax, 728(%rdi)
-; CHECK-NEXT:    movq %rax, 720(%rdi)
-; CHECK-NEXT:    movq %rax, 712(%rdi)
-; CHECK-NEXT:    movq %rax, 704(%rdi)
-; CHECK-NEXT:    movq %rax, 696(%rdi)
-; CHECK-NEXT:    movq %rax, 688(%rdi)
-; CHECK-NEXT:    movq %rax, 680(%rdi)
-; CHECK-NEXT:    movq %rax, 672(%rdi)
-; CHECK-NEXT:    movq %rax, 664(%rdi)
-; CHECK-NEXT:    movq %rax, 656(%rdi)
-; CHECK-NEXT:    movq %rax, 648(%rdi)
-; CHECK-NEXT:    movq %rax, 640(%rdi)
-; CHECK-NEXT:    movq %rax, 632(%rdi)
-; CHECK-NEXT:    movq %rax, 624(%rdi)
-; CHECK-NEXT:    movq %rax, 616(%rdi)
-; CHECK-NEXT:    movq %rax, 608(%rdi)
-; CHECK-NEXT:    movq %rax, 600(%rdi)
-; CHECK-NEXT:    movq %rax, 592(%rdi)
-; CHECK-NEXT:    movq %rax, 584(%rdi)
-; CHECK-NEXT:    movq %rax, 576(%rdi)
-; CHECK-NEXT:    movq %rax, 568(%rdi)
-; CHECK-NEXT:    movq %rax, 560(%rdi)
-; CHECK-NEXT:    movq %rax, 552(%rdi)
-; CHECK-NEXT:    movq %rax, 544(%rdi)
-; CHECK-NEXT:    movq %rax, 536(%rdi)
-; CHECK-NEXT:    movq %rax, 528(%rdi)
-; CHECK-NEXT:    movq %rax, 520(%rdi)
-; CHECK-NEXT:    movq %rax, 512(%rdi)
-; CHECK-NEXT:    movq %rax, 504(%rdi)
-; CHECK-NEXT:    movq %rax, 496(%rdi)
-; CHECK-NEXT:    movq %rax, 488(%rdi)
-; CHECK-NEXT:    movq %rax, 480(%rdi)
-; CHECK-NEXT:    movq %rax, 472(%rdi)
-; CHECK-NEXT:    movq %rax, 464(%rdi)
-; CHECK-NEXT:    movq %rax, 456(%rdi)
-; CHECK-NEXT:    movq %rax, 448(%rdi)
-; CHECK-NEXT:    movq %rax, 440(%rdi)
-; CHECK-NEXT:    movq %rax, 432(%rdi)
-; CHECK-NEXT:    movq %rax, 424(%rdi)
-; CHECK-NEXT:    movq %rax, 416(%rdi)
-; CHECK-NEXT:    movq %rax, 408(%rdi)
-; CHECK-NEXT:    movq %rax, 400(%rdi)
-; CHECK-NEXT:    movq %rax, 392(%rdi)
-; CHECK-NEXT:    movq %rax, 384(%rdi)
-; CHECK-NEXT:    movq %rax, 376(%rdi)
-; CHECK-NEXT:    movq %rax, 368(%rdi)
-; CHECK-NEXT:    movq %rax, 360(%rdi)
-; CHECK-NEXT:    movq %rax, 352(%rdi)
-; CHECK-NEXT:    movq %rax, 344(%rdi)
-; CHECK-NEXT:    movq %rax, 336(%rdi)
-; CHECK-NEXT:    movq %rax, 328(%rdi)
-; CHECK-NEXT:    movq %rax, 320(%rdi)
-; CHECK-NEXT:    movq %rax, 312(%rdi)
-; CHECK-NEXT:    movq %rax, 304(%rdi)
-; CHECK-NEXT:    movq %rax, 296(%rdi)
-; CHECK-NEXT:    movq %rax, 288(%rdi)
-; CHECK-NEXT:    movq %rax, 280(%rdi)
-; CHECK-NEXT:    movq %rax, 272(%rdi)
-; CHECK-NEXT:    movq %rax, 264(%rdi)
-; CHECK-NEXT:    movq %rax, 256(%rdi)
-; CHECK-NEXT:    movq %rax, 248(%rdi)
-; CHECK-NEXT:    movq %rax, 240(%rdi)
-; CHECK-NEXT:    movq %rax, 232(%rdi)
-; CHECK-NEXT:    movq %rax, 224(%rdi)
-; CHECK-NEXT:    movq %rax, 216(%rdi)
-; CHECK-NEXT:    movq %rax, 208(%rdi)
-; CHECK-NEXT:    movq %rax, 200(%rdi)
-; CHECK-NEXT:    movq %rax, 192(%rdi)
-; CHECK-NEXT:    movq %rax, 184(%rdi)
-; CHECK-NEXT:    movq %rax, 176(%rdi)
-; CHECK-NEXT:    movq %rax, 168(%rdi)
-; CHECK-NEXT:    movq %rax, 160(%rdi)
-; CHECK-NEXT:    movq %rax, 152(%rdi)
-; CHECK-NEXT:    movq %rax, 144(%rdi)
-; CHECK-NEXT:    movq %rax, 136(%rdi)
-; CHECK-NEXT:    movq %rax, 128(%rdi)
-; CHECK-NEXT:    movq %rax, 120(%rdi)
-; CHECK-NEXT:    movq %rax, 112(%rdi)
-; CHECK-NEXT:    movq %rax, 104(%rdi)
-; CHECK-NEXT:    movq %rax, 96(%rdi)
-; CHECK-NEXT:    movq %rax, 88(%rdi)
-; CHECK-NEXT:    movq %rax, 80(%rdi)
-; CHECK-NEXT:    movq %rax, 72(%rdi)
-; CHECK-NEXT:    movq %rax, 64(%rdi)
-; CHECK-NEXT:    movq %rax, 56(%rdi)
-; CHECK-NEXT:    movq %rax, 48(%rdi)
-; CHECK-NEXT:    movq %rax, 40(%rdi)
-; CHECK-NEXT:    movq %rax, 32(%rdi)
-; CHECK-NEXT:    movq %rax, 24(%rdi)
-; CHECK-NEXT:    movq %rax, 16(%rdi)
-; CHECK-NEXT:    movq %rax, 8(%rdi)
-; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    movl $1024, %ecx # imm = 0x400
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
+; CHECK-NEXT:    rep;stosb %al, %es:(%rdi)
 ; CHECK-NEXT:    retq
   tail call void @llvm.memset.inline.p0.i64(ptr %a, i8 %value, i64 1024, i1 0)
   ret void



More information about the llvm-commits mailing list