[llvm] 5a56a25 - [CodeGenPrepare] Make TargetPassConfig required

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 2 09:28:57 PST 2020


Author: Fangrui Song
Date: 2020-02-02T09:28:45-08:00
New Revision: 5a56a25b0bd1279c41604a64c6a1bec3815b9b36

URL: https://github.com/llvm/llvm-project/commit/5a56a25b0bd1279c41604a64c6a1bec3815b9b36
DIFF: https://github.com/llvm/llvm-project/commit/5a56a25b0bd1279c41604a64c6a1bec3815b9b36.diff

LOG: [CodeGenPrepare] Make TargetPassConfig required

The code paths in the absence of TargetMachine, TargetLowering or
TargetRegisterInfo are poorly tested. As rL285987 said, requiring
TargetPassConfig allows us to delete many (untested) checks littered
everywhere.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D73754

Added: 
    llvm/test/CodeGen/X86/dont-remove-empty-preheader.ll
    llvm/test/DebugInfo/X86/codegenprep-value.ll
    llvm/test/DebugInfo/X86/sunk-compare.ll
    llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
    llvm/test/Transforms/CodeGenPrepare/X86/gep-unmerging.ll
    llvm/test/Transforms/CodeGenPrepare/X86/invariant.group.ll
    llvm/test/Transforms/CodeGenPrepare/X86/split-indirect-loop.ll
    llvm/test/Transforms/CodeGenPrepare/X86/widenable-condition.ll

Modified: 
    llvm/lib/CodeGen/CodeGenPrepare.cpp
    llvm/test/CodeGen/AArch64/sve-vscale.ll
    llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
    llvm/test/Other/2007-04-24-eliminate-mostly-empty-blocks.ll
    llvm/test/Transforms/CodeGenPrepare/X86/memset_chk-simplify-nobuiltin.ll

Removed: 
    llvm/test/CodeGen/Generic/dont-remove-empty-preheader.ll
    llvm/test/DebugInfo/Generic/codegenprep-value.ll
    llvm/test/DebugInfo/Generic/sunk-compare.ll
    llvm/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
    llvm/test/Transforms/CodeGenPrepare/gep-unmerging.ll
    llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
    llvm/test/Transforms/CodeGenPrepare/split-indirect-loop.ll
    llvm/test/Transforms/CodeGenPrepare/widenable-condition.ll


################################################################################
diff  --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 34731d3836bd..5dcda734ce3b 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -327,6 +327,7 @@ class TypePromotionTransaction;
       // FIXME: When we can selectively preserve passes, preserve the domtree.
       AU.addRequired<ProfileSummaryInfoWrapperPass>();
       AU.addRequired<TargetLibraryInfoWrapperPass>();
+      AU.addRequired<TargetPassConfig>();
       AU.addRequired<TargetTransformInfoWrapperPass>();
       AU.addRequired<LoopInfoWrapperPass>();
     }
@@ -428,12 +429,10 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
   InsertedInsts.clear();
   PromotedInsts.clear();
 
-  if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
-    TM = &TPC->getTM<TargetMachine>();
-    SubtargetInfo = TM->getSubtargetImpl(F);
-    TLI = SubtargetInfo->getTargetLowering();
-    TRI = SubtargetInfo->getRegisterInfo();
-  }
+  TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
+  SubtargetInfo = TM->getSubtargetImpl(F);
+  TLI = SubtargetInfo->getTargetLowering();
+  TRI = SubtargetInfo->getRegisterInfo();
   TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
@@ -450,10 +449,9 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
 
   /// This optimization identifies DIV instructions that can be
   /// profitably bypassed and carried out with a shorter, faster divide.
-  if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI &&
-      TLI->isSlowDivBypassed()) {
+  if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
     const DenseMap<unsigned int, unsigned int> &BypassWidths =
-       TLI->getBypassSlowDivWidths();
+        TLI->getBypassSlowDivWidths();
     BasicBlock* BB = &*F.begin();
     while (BB != nullptr) {
       // bypassSlowDivision may create new BBs, but we don't want to reapply the
@@ -1813,7 +1811,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
                                   const TargetLowering *TLI,
                                   const DataLayout *DL,
                                   bool &ModifiedDT) {
-  if (!TLI || !DL)
+  if (!DL)
     return false;
 
   // If a zero input is undefined, it doesn't make sense to despeculate that.
@@ -1877,7 +1875,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
   // Lower inline assembly if we can.
   // If we found an inline asm expession, and if the target knows how to
   // lower it to normal LLVM code, do so now.
-  if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
+  if (isa<InlineAsm>(CI->getCalledValue())) {
     if (TLI->ExpandInlineAsm(CI)) {
       // Avoid invalidating the iterator.
       CurInstIterator = BB->begin();
@@ -1894,7 +1892,7 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
   // Align the pointer arguments to this call if the target thinks it's a good
   // idea
   unsigned MinSize, PrefAlign;
-  if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
+  if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
     for (auto &Arg : CI->arg_operands()) {
       // We want to align both objects whose address is used directly and
       // objects whose address is used in casts and GEPs, though it only makes
@@ -2028,17 +2026,15 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
     }
     }
 
-    if (TLI) {
-      SmallVector<Value*, 2> PtrOps;
-      Type *AccessTy;
-      if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
-        while (!PtrOps.empty()) {
-          Value *PtrVal = PtrOps.pop_back_val();
-          unsigned AS = PtrVal->getType()->getPointerAddressSpace();
-          if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
-            return true;
-        }
-    }
+    SmallVector<Value *, 2> PtrOps;
+    Type *AccessTy;
+    if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
+      while (!PtrOps.empty()) {
+        Value *PtrVal = PtrOps.pop_back_val();
+        unsigned AS = PtrVal->getType()->getPointerAddressSpace();
+        if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
+          return true;
+      }
   }
 
   // From here on out we're working with named functions.
@@ -2089,9 +2085,6 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
 ///   ret i32 %tmp2
 /// @endcode
 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
-  if (!TLI)
-    return false;
-
   ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
   if (!RetI)
     return false;
@@ -4907,7 +4900,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
     if (SunkAddr->getType() != Addr->getType())
       SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
   } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
-                                   TM && SubtargetInfo->addrSinkUsingGEPs())) {
+                                   SubtargetInfo->addrSinkUsingGEPs())) {
     // By default, we use the GEP-based method when AA is used later. This
     // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
     LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
@@ -5258,7 +5251,7 @@ bool CodeGenPrepare::tryToPromoteExts(
     // this check inside the for loop is to catch the case where an extension
     // is directly fed by a load because in such case the extension can be moved
     // up without any promotion on its operands.
-    if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
+    if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
       return false;
 
     // Get the action to perform the promotion.
@@ -5583,11 +5576,6 @@ bool CodeGenPrepare::canFormExtLd(
 /// \p Inst[in/out] the extension may be modified during the process if some
 /// promotions apply.
 bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
-  // ExtLoad formation and address type promotion infrastructure requires TLI to
-  // be effective.
-  if (!TLI)
-    return false;
-
   bool AllowPromotionWithoutCommonHeader = false;
   /// See if it is an interesting sext operations for the address type
   /// promotion before trying to promote it, e.g., the ones with the right
@@ -5717,7 +5705,7 @@ bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
     return false;
 
   // Only do this xform if truncating is free.
-  if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
+  if (!TLI->isTruncateFree(I->getType(), Src->getType()))
     return false;
 
   // Only safe to perform the optimization if the source is also defined in
@@ -6064,9 +6052,8 @@ bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
 /// turn it into a branch.
 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
   // If branch conversion isn't desirable, exit early.
-  if (DisableSelectToBranch ||
-      OptSize || llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()) ||
-      !TLI)
+  if (DisableSelectToBranch || OptSize ||
+      llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))
     return false;
 
   // Find all consecutive select instructions that share the same condition.
@@ -6252,7 +6239,7 @@ bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
   BasicBlock *DefBB = SVI->getParent();
 
   // Only do this xform if variable vector shifts are particularly expensive.
-  if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
+  if (!TLI->isVectorShiftByScalarCheap(SVI->getType()))
     return false;
 
   // We only expect better codegen by sinking a shuffle if we can recognise a
@@ -6304,7 +6291,7 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
   // If the operands of I can be folded into a target instruction together with
   // I, duplicate and sink them.
   SmallVector<Use *, 4> OpsToSink;
-  if (!TLI || !TLI->shouldSinkOperands(I, OpsToSink))
+  if (!TLI->shouldSinkOperands(I, OpsToSink))
     return false;
 
   // OpsToSink can contain multiple uses in a use chain (e.g.
@@ -6357,7 +6344,7 @@ bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
 }
 
 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
-  if (!TLI || !DL)
+  if (!DL)
     return false;
 
   Value *Cond = SI->getCondition();
@@ -6723,7 +6710,7 @@ void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
 /// has this feature and this is profitable.
 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
   unsigned CombineCost = std::numeric_limits<unsigned>::max();
-  if (DisableStoreExtract || !TLI ||
+  if (DisableStoreExtract ||
       (!StressStoreExtract &&
        !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
                                        Inst->getOperand(1), CombineCost)))
@@ -7092,16 +7079,15 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
     if (isa<Constant>(CI->getOperand(0)))
       return false;
 
-    if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
+    if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
       return true;
 
     if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
       /// Sink a zext or sext into its user blocks if the target type doesn't
       /// fit in one register
-      if (TLI &&
-          TLI->getTypeAction(CI->getContext(),
+      if (TLI->getTypeAction(CI->getContext(),
                              TLI->getValueType(*DL, CI->getType())) ==
-              TargetLowering::TypeExpandInteger) {
+          TargetLowering::TypeExpandInteger) {
         return SinkCast(CI);
       } else {
         bool MadeChange = optimizeExt(I);
@@ -7112,30 +7098,24 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
   }
 
   if (auto *Cmp = dyn_cast<CmpInst>(I))
-    if (TLI && optimizeCmp(Cmp, ModifiedDT))
+    if (optimizeCmp(Cmp, ModifiedDT))
       return true;
 
   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
     LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
-    if (TLI) {
-      bool Modified = optimizeLoadExt(LI);
-      unsigned AS = LI->getPointerAddressSpace();
-      Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
-      return Modified;
-    }
-    return false;
+    bool Modified = optimizeLoadExt(LI);
+    unsigned AS = LI->getPointerAddressSpace();
+    Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
+    return Modified;
   }
 
   if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
-    if (TLI && splitMergedValStore(*SI, *DL, *TLI))
+    if (splitMergedValStore(*SI, *DL, *TLI))
       return true;
     SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
-    if (TLI) {
-      unsigned AS = SI->getPointerAddressSpace();
-      return optimizeMemoryInst(I, SI->getOperand(1),
-                                SI->getOperand(0)->getType(), AS);
-    }
-    return false;
+    unsigned AS = SI->getPointerAddressSpace();
+    return optimizeMemoryInst(I, SI->getOperand(1),
+                              SI->getOperand(0)->getType(), AS);
   }
 
   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
@@ -7152,15 +7132,14 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
 
   BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
 
-  if (BinOp && (BinOp->getOpcode() == Instruction::And) &&
-      EnableAndCmpSinking && TLI)
+  if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking)
     return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
 
   // TODO: Move this into the switch on opcode - it handles shifts already.
   if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
                 BinOp->getOpcode() == Instruction::LShr)) {
     ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
-    if (TLI && CI && TLI->hasExtractBitsInsn())
+    if (CI && TLI->hasExtractBitsInsn())
       if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
         return true;
   }
@@ -7239,7 +7218,7 @@ bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) {
   }
 
   bool MadeBitReverse = true;
-  while (TLI && MadeBitReverse) {
+  while (MadeBitReverse) {
     MadeBitReverse = false;
     for (auto &I : reverse(BB)) {
       if (makeBitReverse(I, *DL, *TLI)) {
@@ -7351,7 +7330,7 @@ static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
 ///
 bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) {
-  if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
+  if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
     return false;
 
   bool MadeChange = false;

diff  --git a/llvm/test/CodeGen/AArch64/sve-vscale.ll b/llvm/test/CodeGen/AArch64/sve-vscale.ll
index d156b1997e59..3415d188feff 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale.ll
@@ -1,5 +1,5 @@
 ; RUN: llc -mtriple aarch64 -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
-; RUN: opt -codegenprepare -S < %s | llc -mtriple aarch64 -mattr=+sve -asm-verbose=0 | FileCheck %s
+; RUN: opt -mtriple=aarch64 -codegenprepare -S < %s | llc -mtriple=aarch64 -mattr=+sve -asm-verbose=0 | FileCheck %s
 
 ;
 ; RDVL

diff  --git a/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll b/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
index e40817d2e32b..b93080e896fa 100644
--- a/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
+++ b/llvm/test/CodeGen/PowerPC/splitstore-check-volatile.ll
@@ -1,6 +1,6 @@
 ; Test that CodeGenPrepare respect the volatile flag when splitting a store.
 ;
-; RUN: opt -S -codegenprepare -force-split-store < %s  | FileCheck %s
+; RUN: opt -S -mtriple=powerpc64le -codegenprepare -force-split-store < %s  | FileCheck %s
 
 define void @fun(i16* %Src, i16* %Dst) {
 ; CHECK: store volatile i16 %8, i16* %Dst 

diff  --git a/llvm/test/CodeGen/Generic/dont-remove-empty-preheader.ll b/llvm/test/CodeGen/X86/dont-remove-empty-preheader.ll
similarity index 95%
rename from llvm/test/CodeGen/Generic/dont-remove-empty-preheader.ll
rename to llvm/test/CodeGen/X86/dont-remove-empty-preheader.ll
index 36af1ffa8bad..c5d32bfd7cec 100644
--- a/llvm/test/CodeGen/Generic/dont-remove-empty-preheader.ll
+++ b/llvm/test/CodeGen/X86/dont-remove-empty-preheader.ll
@@ -1,4 +1,4 @@
-; RUN: opt -codegenprepare -S < %s | FileCheck %s
+; RUN: opt -mtriple=x86_64 -codegenprepare -S < %s | FileCheck %s
 ; CHECK: for.body.preheader
 
 @N = common global i32 0, align 4

diff  --git a/llvm/test/DebugInfo/Generic/codegenprep-value.ll b/llvm/test/DebugInfo/X86/codegenprep-value.ll
similarity index 97%
rename from llvm/test/DebugInfo/Generic/codegenprep-value.ll
rename to llvm/test/DebugInfo/X86/codegenprep-value.ll
index cf394383b10b..52087b619dc8 100644
--- a/llvm/test/DebugInfo/Generic/codegenprep-value.ll
+++ b/llvm/test/DebugInfo/X86/codegenprep-value.ll
@@ -1,4 +1,4 @@
-; RUN: opt -codegenprepare -S %s | FileCheck %s
+; RUN: opt -S -mtriple=x86_64 -codegenprepare %s | FileCheck %s
 ;
 ; Generated from the following source with:
 ; clang -O2 -g -S -emit-llvm -mllvm -stop-after=indirectbr-expand test.cpp

diff  --git a/llvm/test/DebugInfo/Generic/sunk-compare.ll b/llvm/test/DebugInfo/X86/sunk-compare.ll
similarity index 96%
rename from llvm/test/DebugInfo/Generic/sunk-compare.ll
rename to llvm/test/DebugInfo/X86/sunk-compare.ll
index 279887b31d1f..5ca164ccb26e 100644
--- a/llvm/test/DebugInfo/Generic/sunk-compare.ll
+++ b/llvm/test/DebugInfo/X86/sunk-compare.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple=x86_64 -codegenprepare < %s | FileCheck %s
 ;
 ; This test case has been generated by hand but is inspired by the
 ; observation that compares that are sunk into the basic blocks where

diff  --git a/llvm/test/Other/2007-04-24-eliminate-mostly-empty-blocks.ll b/llvm/test/Other/2007-04-24-eliminate-mostly-empty-blocks.ll
index cb316cba0316..a6a8e101108d 100644
--- a/llvm/test/Other/2007-04-24-eliminate-mostly-empty-blocks.ll
+++ b/llvm/test/Other/2007-04-24-eliminate-mostly-empty-blocks.ll
@@ -1,4 +1,4 @@
-;RUN: opt < %s -codegenprepare -S | FileCheck %s
+;RUN: opt < %s -codegenprepare -S -mtriple=x86_64 | FileCheck %s
 
 ;CHECK: define void @foo()
 ;CHECK-NEXT: entry:

diff  --git a/llvm/test/Transforms/CodeGenPrepare/bitreverse-hang.ll b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
similarity index 95%
rename from llvm/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
rename to llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
index 4abc57d84730..0ad9261df0c5 100644
--- a/llvm/test/Transforms/CodeGenPrepare/bitreverse-hang.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -loop-unroll -codegenprepare -S | FileCheck %s
+; RUN: opt < %s -loop-unroll -codegenprepare -S -mtriple=x86_64 | FileCheck %s
 
 ; This test is a worst-case scenario for bitreversal/byteswap detection.
 ; After loop unrolling (the unrolled loop is unreadably large so it has been kept

diff  --git a/llvm/test/Transforms/CodeGenPrepare/gep-unmerging.ll b/llvm/test/Transforms/CodeGenPrepare/X86/gep-unmerging.ll
similarity index 88%
rename from llvm/test/Transforms/CodeGenPrepare/gep-unmerging.ll
rename to llvm/test/Transforms/CodeGenPrepare/X86/gep-unmerging.ll
index 097567565518..83b45aa8bbba 100644
--- a/llvm/test/Transforms/CodeGenPrepare/gep-unmerging.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/gep-unmerging.ll
@@ -1,4 +1,4 @@
-; RUN: opt -codegenprepare -S < %s | FileCheck %s
+; RUN: opt -codegenprepare -S -mtriple=x86_64 < %s | FileCheck %s
 
 @exit_addr = constant i8* blockaddress(@gep_unmerging, %exit)
 @op1_addr = constant i8* blockaddress(@gep_unmerging, %op1)
@@ -25,8 +25,8 @@ entry:
 
 op1:
 ; CHECK-LABEL: op1:
-; CHECK-NEXT: %p1_inc2 = getelementptr i8, i8* %p_postinc, i64 2
-; CHECK-NEXT: %p1_inc1 = getelementptr i8, i8* %p_postinc, i64 1
+; CHECK-NEXT: %p1_inc2 = getelementptr i8, i8* %p_preinc, i64 3
+; CHECK-NEXT: %p1_inc1 = getelementptr i8, i8* %p_preinc, i64 2
   %p1_inc2 = getelementptr i8, i8* %p_preinc, i64 3
   %p1_inc1 = getelementptr i8, i8* %p_preinc, i64 2
   %a10 = load i8, i8* %p_postinc
@@ -37,7 +37,7 @@ op1:
 
 op2:
 ; CHECK-LABEL: op2:
-; CHECK-NEXT: %p2_inc = getelementptr i8, i8* %p_postinc, i64 1
+; CHECK-NEXT: %p2_inc = getelementptr i8, i8* %p_preinc, i64 2
   %p2_inc = getelementptr i8, i8* %p_preinc, i64 2
   %a2 = load i8, i8* %p_postinc
   store i8 %a2, i8* @dummy

diff  --git a/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll b/llvm/test/Transforms/CodeGenPrepare/X86/invariant.group.ll
similarity index 93%
rename from llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
rename to llvm/test/Transforms/CodeGenPrepare/X86/invariant.group.ll
index 29ff724f036e..01273135ef51 100644
--- a/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/invariant.group.ll
@@ -1,4 +1,4 @@
-; RUN: opt -codegenprepare -S < %s | FileCheck %s
+; RUN: opt -codegenprepare -S -mtriple=x86_64 < %s | FileCheck %s
 
 @tmp = global i8 0
 

diff  --git a/llvm/test/Transforms/CodeGenPrepare/X86/memset_chk-simplify-nobuiltin.ll b/llvm/test/Transforms/CodeGenPrepare/X86/memset_chk-simplify-nobuiltin.ll
index 09545e268d77..cd982f6c3f9d 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/memset_chk-simplify-nobuiltin.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/memset_chk-simplify-nobuiltin.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -disable-simplify-libcalls -codegenprepare < %s | FileCheck %s
+; RUN: opt -S -mtriple=x86_64 -disable-simplify-libcalls -codegenprepare < %s | FileCheck %s
 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
 
 ; This is a workaround for PR23093: when building with -mkernel/-fno-builtin,

diff  --git a/llvm/test/Transforms/CodeGenPrepare/split-indirect-loop.ll b/llvm/test/Transforms/CodeGenPrepare/X86/split-indirect-loop.ll
similarity index 92%
rename from llvm/test/Transforms/CodeGenPrepare/split-indirect-loop.ll
rename to llvm/test/Transforms/CodeGenPrepare/X86/split-indirect-loop.ll
index cb834bb5dd8f..e5caca67d6cf 100644
--- a/llvm/test/Transforms/CodeGenPrepare/split-indirect-loop.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/split-indirect-loop.ll
@@ -1,4 +1,4 @@
-; RUN: opt -codegenprepare -S < %s | FileCheck %s
+; RUN: opt -codegenprepare -S -mtriple=x86_64 < %s | FileCheck %s
 
 ; Test that an invalid CFG is not created by splitIndirectCriticalEdges
 ; transformation when the 'target' block is a loop to itself.

diff  --git a/llvm/test/Transforms/CodeGenPrepare/widenable-condition.ll b/llvm/test/Transforms/CodeGenPrepare/X86/widenable-condition.ll
similarity index 97%
rename from llvm/test/Transforms/CodeGenPrepare/widenable-condition.ll
rename to llvm/test/Transforms/CodeGenPrepare/X86/widenable-condition.ll
index a12b87ff1158..b26876e0e1e2 100644
--- a/llvm/test/Transforms/CodeGenPrepare/widenable-condition.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/widenable-condition.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -codegenprepare -S < %s | FileCheck %s
+; RUN: opt -codegenprepare -S -mtriple=x86_64 < %s | FileCheck %s
 
 ; Check the idiomatic guard pattern to ensure it's lowered correctly.
 define void @test_guard(i1 %cond_0) {


        


More information about the llvm-commits mailing list