[llvm] r342675 - [inline Cost] Don't mark functions accessing varargs as non-inlinable

Sameer AbuAsal via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 20 11:39:34 PDT 2018


Author: sabuasal
Date: Thu Sep 20 11:39:34 2018
New Revision: 342675

URL: http://llvm.org/viewvc/llvm-project?rev=342675&view=rev
Log:
[inline Cost] Don't mark functions accessing varargs as non-inlinable

Summary:
rL323619 marks functions that are calling va_end as not viable for
inlining. This patch reverses that since this va_end doesn't need
access to the vriadic arguments list that are saved on the stack, only
va_start does.

Reviewers: efriedma, fhahn

Reviewed By: fhahn

Subscribers: eraman, haicheng, llvm-commits

Differential Revision: https://reviews.llvm.org/D52067

Modified:
    llvm/trunk/lib/Analysis/InlineCost.cpp
    llvm/trunk/test/Transforms/Inline/inline-varargs.ll

Modified: llvm/trunk/lib/Analysis/InlineCost.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/InlineCost.cpp?rev=342675&r1=342674&r2=342675&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/InlineCost.cpp (original)
+++ llvm/trunk/lib/Analysis/InlineCost.cpp Thu Sep 20 11:39:34 2018
@@ -137,7 +137,7 @@ class CallAnalyzer : public InstVisitor<
   bool HasReturn;
   bool HasIndirectBr;
   bool HasUninlineableIntrinsic;
-  bool UsesVarArgs;
+  bool InitsVargArgs;
 
   /// Number of bytes allocated statically by the callee.
   uint64_t AllocatedSize;
@@ -283,7 +283,7 @@ public:
         IsCallerRecursive(false), IsRecursiveCall(false),
         ExposesReturnsTwice(false), HasDynamicAlloca(false),
         ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
-        HasUninlineableIntrinsic(false), UsesVarArgs(false), AllocatedSize(0),
+        HasUninlineableIntrinsic(false), InitsVargArgs(false), AllocatedSize(0),
         NumInstructions(0), NumVectorInstructions(0), VectorBonus(0),
         SingleBBBonus(0), EnableLoadElimination(true), LoadEliminationCost(0),
         NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
@@ -1239,8 +1239,7 @@ bool CallAnalyzer::visitCallSite(CallSit
         HasUninlineableIntrinsic = true;
         return false;
       case Intrinsic::vastart:
-      case Intrinsic::vaend:
-        UsesVarArgs = true;
+        InitsVargArgs = true;
         return false;
       }
     }
@@ -1587,7 +1586,7 @@ CallAnalyzer::analyzeBlock(BasicBlock *B
       IR = "indirect branch";
     else if (HasUninlineableIntrinsic)
       IR = "uninlinable intrinsic";
-    else if (UsesVarArgs)
+    else if (InitsVargArgs)
       IR = "varargs";
     if (!IR) {
       if (ORE)
@@ -2079,9 +2078,8 @@ bool llvm::isInlineViable(Function &F) {
         // Disallow inlining functions that call @llvm.localescape. Doing this
         // correctly would require major changes to the inliner.
         case llvm::Intrinsic::localescape:
-        // Disallow inlining of functions that access VarArgs.
+        // Disallow inlining of functions that initialize VarArgs with va_start.
         case llvm::Intrinsic::vastart:
-        case llvm::Intrinsic::vaend:
           return false;
         }
     }

Modified: llvm/trunk/test/Transforms/Inline/inline-varargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/Inline/inline-varargs.ll?rev=342675&r1=342674&r2=342675&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/Inline/inline-varargs.ll (original)
+++ llvm/trunk/test/Transforms/Inline/inline-varargs.ll Thu Sep 20 11:39:34 2018
@@ -84,6 +84,35 @@ define i32 @call_vargs() {
 ; CHECK: %res1 = call i32 (...) @varg_accessed(i32 10)
 ; CHECK-NEXT: %res2 = call i32 (...) @varg_accessed_alwaysinline(i32 15)
 
+define void @caller_with_vastart(i8* noalias nocapture readnone %args, ...) {
+entry:
+  %ap = alloca i8*, align 4
+  %ap.ptr = bitcast i8** %ap to i8*
+  %ap2 = alloca i8*, align 4
+  %ap2.ptr = bitcast i8** %ap to i8*
+  call void @llvm.va_start(i8* nonnull %ap.ptr)
+  call fastcc void @callee_with_vaend(i8* nonnull %ap.ptr)
+  call void @llvm.va_start(i8* nonnull %ap2.ptr)
+  call fastcc void @callee_with_vaend_alwaysinline(i8* nonnull %ap2.ptr)
+  ret void
+}
+
+define internal fastcc void @callee_with_vaend_alwaysinline(i8* %a) alwaysinline {
+entry:
+  tail call void @llvm.va_end(i8* %a)
+  ret void
+}
+
+define internal fastcc void @callee_with_vaend(i8* %a) {
+entry:
+  tail call void @llvm.va_end(i8* %a)
+  ret void
+}
+
+; CHECK-LABEL: @caller_with_vastart
+; CHECK-NOT: @callee_with_vaend
+; CHECK-NOT: @callee_with_vaend_alwaysinline
+
 declare void @llvm.va_start(i8*)
 declare void @llvm.va_end(i8*)
 




More information about the llvm-commits mailing list