[llvm-branch-commits] [llvm] 12b684a - [VectorCombine] improve readability; NFC

Sanjay Patel via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Dec 10 10:15:24 PST 2020


Author: Sanjay Patel
Date: 2020-12-10T13:10:26-05:00
New Revision: 12b684ae02226f7785d3fb412fb155d4e15cc9bd

URL: https://github.com/llvm/llvm-project/commit/12b684ae02226f7785d3fb412fb155d4e15cc9bd
DIFF: https://github.com/llvm/llvm-project/commit/12b684ae02226f7785d3fb412fb155d4e15cc9bd.diff

LOG: [VectorCombine] improve readability; NFC

If we are going to allow adjusting the pointer for GEPs,
rearranging the code a bit will make it easier to follow.

Added: 
    

Modified: 
    llvm/lib/Transforms/Vectorize/VectorCombine.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 0d0a338afca3..19f5a2b432f7 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -116,15 +116,16 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
     return false;
 
   // TODO: Extend this to match GEP with constant offsets.
-  Value *PtrOp = Load->getPointerOperand()->stripPointerCasts();
-  assert(isa<PointerType>(PtrOp->getType()) && "Expected a pointer type");
-  unsigned AS = Load->getPointerAddressSpace();
+  const DataLayout &DL = I.getModule()->getDataLayout();
+  Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
+  assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
 
   // If original AS != Load's AS, we can't bitcast the original pointer and have
   // to use Load's operand instead. Ideally we would want to strip pointer casts
   // without changing AS, but there's no API to do that ATM.
-  if (AS != PtrOp->getType()->getPointerAddressSpace())
-    PtrOp = Load->getPointerOperand();
+  unsigned AS = Load->getPointerAddressSpace();
+  if (AS != SrcPtr->getType()->getPointerAddressSpace())
+    SrcPtr = Load->getPointerOperand();
 
   Type *ScalarTy = Scalar->getType();
   uint64_t ScalarSize = ScalarTy->getPrimitiveSizeInBits();
@@ -136,11 +137,9 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
   unsigned MinVecNumElts = MinVectorSize / ScalarSize;
   auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
   Align Alignment = Load->getAlign();
-  const DataLayout &DL = I.getModule()->getDataLayout();
-  if (!isSafeToLoadUnconditionally(PtrOp, MinVecTy, Alignment, DL, Load, &DT))
+  if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Alignment, DL, Load, &DT))
     return false;
 
-
   // Original pattern: insertelt undef, load [free casts of] PtrOp, 0
   Type *LoadTy = Load->getType();
   int OldCost = TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, AS);
@@ -159,7 +158,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
   // It is safe and potentially profitable to load a vector directly:
   // inselt undef, load Scalar, 0 --> load VecPtr
   IRBuilder<> Builder(Load);
-  Value *CastedPtr = Builder.CreateBitCast(PtrOp, MinVecTy->getPointerTo(AS));
+  Value *CastedPtr = Builder.CreateBitCast(SrcPtr, MinVecTy->getPointerTo(AS));
   Value *VecLd = Builder.CreateAlignedLoad(MinVecTy, CastedPtr, Alignment);
 
   // If the insert type does not match the target's minimum vector type,


        


More information about the llvm-branch-commits mailing list