[llvm] 6b4b660 - [X86] Move -x86-use-vzeroupper command line flag into runOnMachineFunction for the pass itself rather than the pass pipeline construction

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 13 14:43:46 PDT 2020


Author: Craig Topper
Date: 2020-06-13T14:42:41-07:00
New Revision: 6b4b660174799898dd0030c60ee77fc633dc02bb

URL: https://github.com/llvm/llvm-project/commit/6b4b660174799898dd0030c60ee77fc633dc02bb
DIFF: https://github.com/llvm/llvm-project/commit/6b4b660174799898dd0030c60ee77fc633dc02bb.diff

LOG: [X86] Move -x86-use-vzeroupper command line flag into runOnMachineFunction for the pass itself rather than the pass pipeline construction

This pass has no dependencies on other passes so conditionally
including it in the pipeline doens't do much. Just move it the
pass itself to keep it isolated.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86TargetMachine.cpp
    llvm/lib/Target/X86/X86VZeroUpper.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index c159728ed1dd..7e00b30915a0 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -313,14 +313,6 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
   return I.get();
 }
 
-//===----------------------------------------------------------------------===//
-// Command line options for x86
-//===----------------------------------------------------------------------===//
-static cl::opt<bool>
-UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
-  cl::desc("Minimize AVX to SSE transition penalty"),
-  cl::init(true));
-
 //===----------------------------------------------------------------------===//
 // X86 TTI query.
 //===----------------------------------------------------------------------===//
@@ -513,8 +505,7 @@ void X86PassConfig::addPreEmitPass() {
 
   addPass(createX86IndirectBranchTrackingPass());
 
-  if (UseVZeroUpper)
-    addPass(createX86IssueVZeroUpperPass());
+  addPass(createX86IssueVZeroUpperPass());
 
   if (getOptLevel() != CodeGenOpt::None) {
     addPass(createX86FixupBWInsts());

diff  --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 7a8308ef1ba9..c188c7443625 100644
--- a/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -39,6 +39,11 @@ using namespace llvm;
 
 #define DEBUG_TYPE "x86-vzeroupper"
 
+static cl::opt<bool>
+UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
+  cl::desc("Minimize AVX to SSE transition penalty"),
+  cl::init(true));
+
 STATISTIC(NumVZU, "Number of vzeroupper instructions inserted");
 
 namespace {
@@ -278,6 +283,9 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
 /// Loop over all of the basic blocks, inserting vzeroupper instructions before
 /// function calls.
 bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
+  if (!UseVZeroUpper)
+    return false;
+
   const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
   if (!ST.hasAVX() || !ST.insertVZEROUPPER())
     return false;


        


More information about the llvm-commits mailing list