[llvm] 1e34ab9 - [Alignment][NFC] Add DebugStr and operator*

Guillaume Chatelet via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 6 00:16:56 PDT 2020


Author: Guillaume Chatelet
Date: 2020-04-06T07:12:46Z
New Revision: 1e34ab98fc6f5ea7e264c0cd19d96b87cbd9c8a5

URL: https://github.com/llvm/llvm-project/commit/1e34ab98fc6f5ea7e264c0cd19d96b87cbd9c8a5
DIFF: https://github.com/llvm/llvm-project/commit/1e34ab98fc6f5ea7e264c0cd19d96b87cbd9c8a5.diff

LOG: [Alignment][NFC] Add DebugStr and operator*

Summary:
Also updates files to use them.

This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: sdardis, hiraditya, jrtc27, atanasyan, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77394

Added: 
    

Modified: 
    llvm/include/llvm/Support/Alignment.h
    llvm/lib/CodeGen/MachineFrameInfo.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/Target/Mips/MipsRegisterInfo.cpp
    llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
    llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h
index cf5f151b5199..9f17bc063d37 100644
--- a/llvm/include/llvm/Support/Alignment.h
+++ b/llvm/include/llvm/Support/Alignment.h
@@ -395,6 +395,16 @@ inline bool operator>(MaybeAlign Lhs, Align Rhs) {
   return Lhs && (*Lhs).value() > Rhs.value();
 }
 
+inline Align operator*(Align Lhs, uint64_t Rhs) {
+  assert(Rhs > 0 && "Rhs must be positive");
+  return Align(Lhs.value() * Rhs);
+}
+
+inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
+  assert(Rhs > 0 && "Rhs must be positive");
+  return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
+}
+
 inline Align operator/(Align Lhs, uint64_t Divisor) {
   assert(llvm::isPowerOf2_64(Divisor) &&
          "Divisor must be positive and a power of 2");
@@ -416,6 +426,19 @@ inline Align max(Align Lhs, MaybeAlign Rhs) {
   return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
 }
 
+#ifndef NDEBUG
+// For usage in LLVM_DEBUG macros.
+inline std::string DebugStr(const Align &A) {
+  return "Align(" + std::to_string(A.value()) + ")";
+}
+// For usage in LLVM_DEBUG macros.
+inline std::string DebugStr(const MaybeAlign &MA) {
+  if (MA)
+    return "MaybeAlign(" + std::to_string(MA->value()) + ")";
+  return "MaybeAlign(None)";
+}
+#endif
+
 #undef ALIGN_CHECK_ISPOSITIVE
 #undef ALIGN_CHECK_ISSET
 

diff  --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp
index 7690505dabe3..7ba27ff1c856 100644
--- a/llvm/lib/CodeGen/MachineFrameInfo.cpp
+++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp
@@ -41,8 +41,9 @@ static inline Align clampStackAlignment(bool ShouldClamp, Align Alignment,
                                         Align StackAlignment) {
   if (!ShouldClamp || Alignment <= StackAlignment)
     return Alignment;
-  LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value()
-                    << " exceeds the stack alignment " << StackAlignment.value()
+  LLVM_DEBUG(dbgs() << "Warning: requested alignment " << DebugStr(Alignment)
+                    << " exceeds the stack alignment "
+                    << DebugStr(StackAlignment)
                     << " when stack realignment is off" << '\n');
   return StackAlignment;
 }

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 823e2a83c965..1cc8110c95e2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -9498,8 +9498,8 @@ static void tryToElideArgumentCopy(
   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
                          "greater than stack argument alignment ("
-                      << RequiredAlignment.value() << " vs "
-                      << MFI.getObjectAlign(FixedIndex).value() << ")\n");
+                      << DebugStr(RequiredAlignment) << " vs "
+                      << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
     return;
   }
 

diff  --git a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index 08967a534bf9..3452bf495a34 100644
--- a/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -266,7 +266,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
                     << "spOffset   : " << spOffset << "\n"
                     << "stackSize  : " << stackSize << "\n"
                     << "alignment  : "
-                    << MF.getFrameInfo().getObjectAlign(FrameIndex).value()
+                    << DebugStr(MF.getFrameInfo().getObjectAlign(FrameIndex))
                     << "\n");
 
   eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);

diff  --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 82de8edaef13..be1679ad9f28 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -297,9 +297,7 @@ class DataFlowSanitizer : public ModulePass {
   friend struct DFSanFunction;
   friend class DFSanVisitor;
 
-  enum {
-    ShadowWidth = 16
-  };
+  enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 };
 
   /// Which ABI should be used for instrumented functions?
   enum InstrumentedABI {
@@ -577,11 +575,11 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
 
   Mod = &M;
   Ctx = &M.getContext();
-  ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
+  ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
   ShadowPtrTy = PointerType::getUnqual(ShadowTy);
   IntptrTy = DL.getIntPtrType(*Ctx);
   ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
-  ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
+  ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
   if (IsX86_64)
     ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
   else if (IsMIPS64)
@@ -1238,7 +1236,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
     }
   }
 
-  const MaybeAlign ShadowAlign(Align * DFS.ShadowWidth / 8);
+  const MaybeAlign ShadowAlign(Align * DFS.ShadowWidthBytes);
   SmallVector<const Value *, 2> Objs;
   GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
   bool AllConstants = true;
@@ -1272,7 +1270,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
         IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos);
   }
   }
-  if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
+  if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) {
     // Fast path for the common case where each byte has identical shadow: load
     // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
     // shadow is non-equal.
@@ -1284,15 +1282,15 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
     FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
 
     // Compare each of the shadows stored in the loaded 64 bits to each other,
-    // by computing (WideShadow rotl ShadowWidth) == WideShadow.
+    // by computing (WideShadow rotl ShadowWidthBits) == WideShadow.
     IRBuilder<> IRB(Pos);
     Value *WideAddr =
         IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
     Value *WideShadow =
         IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
     Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
-    Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
-    Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
+    Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits);
+    Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits);
     Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
     Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
 
@@ -1315,8 +1313,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
     ReplaceInstWithInst(Head->getTerminator(), LastBr);
     DT.addNewBlock(FallbackBB, Head);
 
-    for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
-         Ofs += 64 / DFS.ShadowWidth) {
+    for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
+         Ofs += 64 / DFS.ShadowWidthBits) {
       BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
       DT.addNewBlock(NextBB, LastBr->getParent());
       IRBuilder<> NextIRB(NextBB);
@@ -1386,11 +1384,12 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment,
     }
   }
 
-  const Align ShadowAlign(Alignment.value() * (DFS.ShadowWidth / 8));
+  const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes);
   IRBuilder<> IRB(Pos);
   Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
   if (Shadow == DFS.ZeroShadow) {
-    IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth);
+    IntegerType *ShadowTy =
+        IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
     Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
     Value *ExtShadowAddr =
         IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
@@ -1398,7 +1397,7 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment,
     return;
   }
 
-  const unsigned ShadowVecSize = 128 / DFS.ShadowWidth;
+  const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
   uint64_t Offset = 0;
   if (Size >= ShadowVecSize) {
     VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize);
@@ -1548,9 +1547,9 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
   IRBuilder<> IRB(&I);
   Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
   Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
-  Value *LenShadow = IRB.CreateMul(
-      I.getLength(),
-      ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
+  Value *LenShadow =
+      IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
+                                                    DFSF.DFS.ShadowWidthBytes));
   Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
   Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
   SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
@@ -1558,11 +1557,11 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
       IRB.CreateCall(I.getFunctionType(), I.getCalledValue(),
                      {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
   if (ClPreserveAlignment) {
-    MTI->setDestAlignment(I.getDestAlignment() * (DFSF.DFS.ShadowWidth / 8));
-    MTI->setSourceAlignment(I.getSourceAlignment() * (DFSF.DFS.ShadowWidth / 8));
+    MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
+    MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes);
   } else {
-    MTI->setDestAlignment(DFSF.DFS.ShadowWidth / 8);
-    MTI->setSourceAlignment(DFSF.DFS.ShadowWidth / 8);
+    MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes));
+    MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes));
   }
   if (ClEventCallbacks) {
     IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,

diff  --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index 18715d8e7fd6..ff831eb71c98 100644
--- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -90,9 +90,9 @@ FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
 // to a constant. Using SCEV to compute alignment handles the case where
 // DiffSCEV is a recurrence with constant start such that the aligned offset
 // is constant. e.g. {16,+,32} % 32 -> 16.
-static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV,
-                                    const SCEV *AlignSCEV,
-                                    ScalarEvolution *SE) {
+static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
+                                      const SCEV *AlignSCEV,
+                                      ScalarEvolution *SE) {
   // DiffUnits = Diff % int64_t(Alignment)
   const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
 
@@ -107,25 +107,24 @@ static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV,
     // displaced pointer has the same alignment as the aligned pointer, so
     // return the alignment value.
     if (!DiffUnits)
-      return (unsigned)
-        cast<SCEVConstant>(AlignSCEV)->getValue()->getSExtValue();
+      return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue();
 
     // If the displacement is not an exact multiple, but the remainder is a
     // constant, then return this remainder (but only if it is a power of 2).
     uint64_t DiffUnitsAbs = std::abs(DiffUnits);
     if (isPowerOf2_64(DiffUnitsAbs))
-      return (unsigned) DiffUnitsAbs;
+      return Align(DiffUnitsAbs);
   }
 
-  return 0;
+  return None;
 }
 
 // There is an address given by an offset OffSCEV from AASCEV which has an
 // alignment AlignSCEV. Use that information, if possible, to compute a new
 // alignment for Ptr.
-static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
-                                const SCEV *OffSCEV, Value *Ptr,
-                                ScalarEvolution *SE) {
+static MaybeAlign getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
+                                  const SCEV *OffSCEV, Value *Ptr,
+                                  ScalarEvolution *SE) {
   const SCEV *PtrSCEV = SE->getSCEV(Ptr);
   // On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes
   // (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV
@@ -146,13 +145,12 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
                     << *AlignSCEV << " and offset " << *OffSCEV
                     << " using 
diff  " << *DiffSCEV << "\n");
 
-  unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE);
-  LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n");
+  if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) {
+    LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n");
+    return *NewAlignment;
+  }
 
-  if (NewAlignment) {
-    return NewAlignment;
-  } else if (const SCEVAddRecExpr *DiffARSCEV =
-             dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
+  if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
     // The relative offset to the alignment assumption did not yield a constant,
     // but we should try harder: if we assume that a is 32-byte aligned, then in
     // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
@@ -170,34 +168,34 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
     // first iteration, and also the alignment using the per-iteration delta.
     // If these are the same, then use that answer. Otherwise, use the smaller
     // one, but only if it divides the larger one.
-    NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
-    unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
-
-    LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n");
-    LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n");
-
-    if (!NewAlignment || !NewIncAlignment) {
-      return 0;
-    } else if (NewAlignment > NewIncAlignment) {
-      if (NewAlignment % NewIncAlignment == 0) {
-        LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment
-                          << "\n");
-        return NewIncAlignment;
-      }
-    } else if (NewIncAlignment > NewAlignment) {
-      if (NewIncAlignment % NewAlignment == 0) {
-        LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
-                          << "\n");
-        return NewAlignment;
-      }
-    } else if (NewIncAlignment == NewAlignment) {
-      LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
-                        << "\n");
+    MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
+    MaybeAlign NewIncAlignment =
+        getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
+
+    LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment)
+                      << "\n");
+    LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment)
+                      << "\n");
+
+    // Both None or set to the same value.
+    if (NewAlignment == NewIncAlignment) {
+      LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
+                        << DebugStr(NewAlignment) << "\n");
+      return NewAlignment;
+    }
+    if (NewAlignment > NewIncAlignment) {
+      LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
+                        << DebugStr(NewIncAlignment) << "\n");
+      return NewIncAlignment;
+    }
+    if (NewIncAlignment > NewAlignment) {
+      LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
+                        << DebugStr(NewAlignment) << "\n");
       return NewAlignment;
     }
   }
 
-  return 0;
+  return None;
 }
 
 bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
@@ -323,26 +321,27 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
     Instruction *J = WorkList.pop_back_val();
 
     if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
-      unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
-        LI->getPointerOperand(), SE);
+      MaybeAlign NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
+                                                LI->getPointerOperand(), SE);
 
       if (NewAlignment > LI->getAlignment()) {
         LI->setAlignment(MaybeAlign(NewAlignment));
         ++NumLoadAlignChanged;
       }
     } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
-      unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
-        SI->getPointerOperand(), SE);
+      MaybeAlign NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
+                                                SI->getPointerOperand(), SE);
 
       if (NewAlignment > SI->getAlignment()) {
         SI->setAlignment(MaybeAlign(NewAlignment));
         ++NumStoreAlignChanged;
       }
     } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
-      unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
-        MI->getDest(), SE);
+      MaybeAlign NewDestAlignment =
+          getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
 
-      LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";);
+      LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
+                        << "\n";);
       if (NewDestAlignment > MI->getDestAlignment()) {
         MI->setDestAlignment(NewDestAlignment);
         ++NumMemIntAlignChanged;
@@ -351,10 +350,11 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
       // For memory transfers, there is also a source alignment that
       // can be set.
       if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
-        unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
-          MTI->getSource(), SE);
+        MaybeAlign NewSrcAlignment =
+            getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
 
-        LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";);
+        LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment)
+                          << "\n";);
 
         if (NewSrcAlignment > MTI->getSourceAlignment()) {
           MTI->setSourceAlignment(NewSrcAlignment);


        


More information about the llvm-commits mailing list