[llvm] f26bdb5 - Make Value::getPointerAlignment() return an Align, not a MaybeAlign.

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Wed May 20 16:37:40 PDT 2020


Author: Eli Friedman
Date: 2020-05-20T16:37:20-07:00
New Revision: f26bdb539e8acef23e2a0370408521a6458001ee

URL: https://github.com/llvm/llvm-project/commit/f26bdb539e8acef23e2a0370408521a6458001ee
DIFF: https://github.com/llvm/llvm-project/commit/f26bdb539e8acef23e2a0370408521a6458001ee.diff

LOG: Make Value::getPointerAlignment() return an Align, not a MaybeAlign.

If we don't know anything about the alignment of a pointer, Align(1) is
still correct: all pointers are at least 1-byte aligned.

Included in this patch is a bugfix for an issue discovered during this
cleanup: pointers with "dereferenceable" attributes/metadata were
assumed to be aligned according to the type of the pointer.  This
wasn't intentional, as far as I can tell, so Loads.cpp was fixed to
stop making this assumption. Frontends may need to be updated.  I
updated clang's handling of C++ references, and added a release note for
this.

Differential Revision: https://reviews.llvm.org/D80072

Added: 
    

Modified: 
    llvm/docs/ReleaseNotes.rst
    llvm/include/llvm/IR/Value.h
    llvm/lib/Analysis/Loads.cpp
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/CodeGen/ExpandMemCmp.cpp
    llvm/lib/IR/ConstantFold.cpp
    llvm/lib/IR/Value.cpp
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/lib/Transforms/IPO/AttributorAttributes.cpp
    llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
    llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
    llvm/test/Transforms/GVN/PRE/pre-load.ll
    llvm/test/Transforms/InstCombine/call-guard.ll
    llvm/test/Transforms/InstCombine/masked_intrinsics.ll
    llvm/test/Transforms/InstCombine/select.ll
    llvm/test/Transforms/LICM/hoist-deref-load.ll
    llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
    llvm/test/Transforms/TailCallElim/reorder_load.ll
    llvm/unittests/IR/FunctionTest.cpp
    polly/test/ScopInfo/invariant_load_dereferenceable.ll

Removed: 
    


################################################################################
diff  --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index e0ff25622704..b47222eefe8a 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -64,6 +64,12 @@ Changes to the LLVM IR
   provided by the attribute is interfaced via the API provided by the
   ``VFDatabase`` class.
 
+* `dereferenceable` attributes and metadata on pointers no longer imply
+  anything about the alignment of the pointer in question. Previously, some
+  optimizations would make assumptions based on the type of the pointer. This
+  behavior was undocumented. To preserve optimizations, frontends may need to
+  be updated to generate appropriate `align` attributes and metadata.
+
 Changes to building LLVM
 ------------------------
 

diff  --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h
index 47f311f1ead6..4244698ba00f 100644
--- a/llvm/include/llvm/IR/Value.h
+++ b/llvm/include/llvm/IR/Value.h
@@ -664,7 +664,7 @@ class Value {
   ///
   /// Returns an alignment which is either specified explicitly, e.g. via
   /// align attribute of a function argument, or guaranteed by DataLayout.
-  MaybeAlign getPointerAlignment(const DataLayout &DL) const;
+  Align getPointerAlignment(const DataLayout &DL) const;
 
   /// Translate PHI node to its predecessor from the given basic block.
   ///

diff  --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index e72ea9ba20d2..470a45368087 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -27,24 +27,12 @@
 
 using namespace llvm;
 
-static MaybeAlign getBaseAlign(const Value *Base, const DataLayout &DL) {
-  if (const MaybeAlign PA = Base->getPointerAlignment(DL))
-    return *PA;
-  Type *const Ty = Base->getType()->getPointerElementType();
-  if (!Ty->isSized())
-    return None;
-  return Align(DL.getABITypeAlignment(Ty));
-}
-
 static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment,
                       const DataLayout &DL) {
-  if (MaybeAlign BA = getBaseAlign(Base, DL)) {
-    const APInt APBaseAlign(Offset.getBitWidth(), BA->value());
-    const APInt APAlign(Offset.getBitWidth(), Alignment.value());
-    assert(APAlign.isPowerOf2() && "must be a power of 2!");
-    return APBaseAlign.uge(APAlign) && !(Offset & (APAlign - 1));
-  }
-  return false;
+  Align BA = Base->getPointerAlignment(DL);
+  const APInt APAlign(Offset.getBitWidth(), Alignment.value());
+  assert(APAlign.isPowerOf2() && "must be a power of 2!");
+  return BA >= Alignment && !(Offset & (APAlign - 1));
 }
 
 /// Test if V is always a pointer to allocated and suitably aligned memory for

diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 8d69df3dec4e..a5fb6fba4d5a 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -2008,9 +2008,8 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts,
 
   // Aligned pointers have trailing zeros - refine Known.Zero set
   if (isa<PointerType>(V->getType())) {
-    const MaybeAlign Align = V->getPointerAlignment(Q.DL);
-    if (Align)
-      Known.Zero.setLowBits(countTrailingZeros(Align->value()));
+    Align Alignment = V->getPointerAlignment(Q.DL);
+    Known.Zero.setLowBits(countTrailingZeros(Alignment.value()));
   }
 
   // computeKnownBitsFromAssume strictly refines Known.

diff  --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp
index 9bc7bb93dc6b..04433ed2f0de 100644
--- a/llvm/lib/CodeGen/ExpandMemCmp.cpp
+++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp
@@ -273,8 +273,8 @@ MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(Type *LoadSizeType,
   // Get the memory source at offset `OffsetBytes`.
   Value *LhsSource = CI->getArgOperand(0);
   Value *RhsSource = CI->getArgOperand(1);
-  Align LhsAlign = LhsSource->getPointerAlignment(DL).valueOrOne();
-  Align RhsAlign = RhsSource->getPointerAlignment(DL).valueOrOne();
+  Align LhsAlign = LhsSource->getPointerAlignment(DL);
+  Align RhsAlign = RhsSource->getPointerAlignment(DL);
   if (OffsetBytes > 0) {
     auto *ByteType = Type::getInt8Ty(CI->getContext());
     LhsSource = Builder.CreateConstGEP1_64(

diff  --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index 8539b91faf11..d090eaac3fb5 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -1210,7 +1210,8 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
           MaybeAlign GVAlign;
 
           if (Module *TheModule = GV->getParent()) {
-            GVAlign = GV->getPointerAlignment(TheModule->getDataLayout());
+            const DataLayout &DL = TheModule->getDataLayout();
+            GVAlign = GV->getPointerAlignment(DL);
 
             // If the function alignment is not specified then assume that it
             // is 4.
@@ -1221,7 +1222,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
             // increased code size (see https://reviews.llvm.org/D55115)
             // FIXME: This code should be deleted once existing targets have
             // appropriate defaults
-            if (!GVAlign && isa<Function>(GV))
+            if (isa<Function>(GV) && !DL.getFunctionPtrAlign())
               GVAlign = Align(4);
           } else if (isa<Function>(GV)) {
             // Without a datalayout we have to assume the worst case: that the

diff  --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp
index 665527094933..70d401295709 100644
--- a/llvm/lib/IR/Value.cpp
+++ b/llvm/lib/IR/Value.cpp
@@ -738,16 +738,16 @@ uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
   return DerefBytes;
 }
 
-MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
+Align Value::getPointerAlignment(const DataLayout &DL) const {
   assert(getType()->isPointerTy() && "must be pointer");
   if (auto *GO = dyn_cast<GlobalObject>(this)) {
     if (isa<Function>(GO)) {
-      const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
+      Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
       switch (DL.getFunctionPtrAlignType()) {
       case DataLayout::FunctionPtrAlignType::Independent:
         return FunctionPtrAlign;
       case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
-        return std::max(FunctionPtrAlign, MaybeAlign(GO->getAlignment()));
+        return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne());
       }
       llvm_unreachable("Unhandled FunctionPtrAlignType");
     }
@@ -760,13 +760,13 @@ MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
           // it the preferred alignment. Otherwise, we have to assume that it
           // may only have the minimum ABI alignment.
           if (GVar->isStrongDefinitionForLinker())
-            return MaybeAlign(DL.getPreferredAlignment(GVar));
+            return Align(DL.getPreferredAlignment(GVar));
           else
             return DL.getABITypeAlign(ObjectType);
         }
       }
     }
-    return Alignment;
+    return Alignment.valueOrOne();
   } else if (const Argument *A = dyn_cast<Argument>(this)) {
     const MaybeAlign Alignment = A->getParamAlign();
     if (!Alignment && A->hasStructRetAttr()) {
@@ -775,25 +775,18 @@ MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
       if (EltTy->isSized())
         return DL.getABITypeAlign(EltTy);
     }
-    return Alignment;
+    return Alignment.valueOrOne();
   } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
-    const MaybeAlign Alignment = AI->getAlign();
-    if (!Alignment) {
-      Type *AllocatedType = AI->getAllocatedType();
-      if (AllocatedType->isSized())
-        return MaybeAlign(DL.getPrefTypeAlignment(AllocatedType));
-    }
-    return Alignment;
+    return AI->getAlign();
   } else if (const auto *Call = dyn_cast<CallBase>(this)) {
-    const MaybeAlign Alignment = Call->getRetAlign();
+    MaybeAlign Alignment = Call->getRetAlign();
     if (!Alignment && Call->getCalledFunction())
-      return MaybeAlign(
-          Call->getCalledFunction()->getAttributes().getRetAlignment());
-    return Alignment;
+      Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
+    return Alignment.valueOrOne();
   } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
     if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
       ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
-      return MaybeAlign(CI->getLimitedValue());
+      return Align(CI->getLimitedValue());
     }
   } else if (auto *CstPtr = dyn_cast<Constant>(this)) {
     if (auto *CstInt = dyn_cast_or_null<ConstantInt>(ConstantExpr::getPtrToInt(
@@ -807,7 +800,7 @@ MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
                        : Value::MaximumAlignment);
     }
   }
-  return llvm::None;
+  return Align(1);
 }
 
 const Value *Value::DoPHITranslation(const BasicBlock *CurBB,

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 458bff9e7af5..03ac81e2462b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2485,8 +2485,8 @@ def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
 def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
   if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
     const DataLayout &DL = MF->getDataLayout();
-    MaybeAlign Align = G->getGlobal()->getPointerAlignment(DL);
-    return Align && *Align >= 4 && G->getOffset() % 4 == 0;
+    Align Align = G->getGlobal()->getPointerAlignment(DL);
+    return Align >= 4 && G->getOffset() % 4 == 0;
   }
   if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
     return C->getAlign() >= 4 && C->getOffset() % 4 == 0;

diff  --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 0117a73003b1..ff7b23a00d5e 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -3523,17 +3523,6 @@ struct AADereferenceableCallSiteReturned final
 
 // ------------------------ Align Argument Attribute ------------------------
 
-/// \p Ptr is accessed so we can get alignment information if the ABI requires
-/// the element type to be aligned.
-static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
-                                                   const DataLayout &DL) {
-  MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
-  Type *ElementTy = Ptr->getType()->getPointerElementType();
-  if (ElementTy->isSized())
-    KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
-  return KnownAlignment;
-}
-
 static unsigned getKnownAlignForUse(Attributor &A,
                                     AbstractAttribute &QueryingAA,
                                     Value &AssociatedValue, const Use *U,
@@ -3569,19 +3558,11 @@ static unsigned getKnownAlignForUse(Attributor &A,
   const DataLayout &DL = A.getDataLayout();
   const Value *UseV = U->get();
   if (auto *SI = dyn_cast<StoreInst>(I)) {
-    if (SI->getPointerOperand() == UseV) {
-      if (unsigned SIAlign = SI->getAlignment())
-        MA = MaybeAlign(SIAlign);
-      else
-        MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
-    }
+    if (SI->getPointerOperand() == UseV)
+      MA = SI->getAlign();
   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
-    if (LI->getPointerOperand() == UseV) {
-      if (unsigned LIAlign = LI->getAlignment())
-        MA = MaybeAlign(LIAlign);
-      else
-        MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
-    }
+    if (LI->getPointerOperand() == UseV)
+      MA = LI->getAlign();
   }
 
   if (!MA.hasValue() || MA <= 1)
@@ -3622,8 +3603,7 @@ struct AAAlignImpl : AAAlign {
     //       their uses and int2ptr is not handled. It is not a correctness
     //       problem though!
     if (!V.getType()->getPointerElementType()->isFunctionTy())
-      takeKnownMaximum(
-          V.getPointerAlignment(A.getDataLayout()).valueOrOne().value());
+      takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
 
     if (getIRPosition().isFnInterfaceKind() &&
         (!getAnchorScope() ||
@@ -3664,9 +3644,9 @@ struct AAAlignImpl : AAAlign {
 
     ChangeStatus Changed = AAAlign::manifest(A);
 
-    MaybeAlign InheritAlign =
+    Align InheritAlign =
         getAssociatedValue().getPointerAlignment(A.getDataLayout());
-    if (InheritAlign.valueOrOne() >= getAssumedAlign())
+    if (InheritAlign >= getAssumedAlign())
       return LoadStoreChanged;
     return Changed | LoadStoreChanged;
   }
@@ -3717,8 +3697,8 @@ struct AAAlignFloating : AAAlignImpl {
       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
       if (!Stripped && this == &AA) {
         // Use only IR information if we did not strip anything.
-        const MaybeAlign PA = V.getPointerAlignment(DL);
-        T.takeKnownMaximum(PA ? PA->value() : 0);
+        Align PA = V.getPointerAlignment(DL);
+        T.takeKnownMaximum(PA.value());
         T.indicatePessimisticFixpoint();
       } else {
         // Use abstract attribute information.
@@ -3786,9 +3766,9 @@ struct AAAlignCallSiteArgument final : AAAlignFloating {
       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
         return ChangeStatus::UNCHANGED;
     ChangeStatus Changed = AAAlignImpl::manifest(A);
-    MaybeAlign InheritAlign =
+    Align InheritAlign =
         getAssociatedValue().getPointerAlignment(A.getDataLayout());
-    if (InheritAlign.valueOrOne() >= getAssumedAlign())
+    if (InheritAlign >= getAssumedAlign())
       Changed = ChangeStatus::UNCHANGED;
     return Changed;
   }

diff  --git a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
index 80bc72d0d5bc..7423378d031a 100644
--- a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
+++ b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll
@@ -51,10 +51,10 @@ entry:
     %sret_gep_outside = getelementptr %struct.A, %struct.A* %result, i64 0, i32 1, i64 7
     load i8, i8* %sret_gep_outside
 
-; CHECK: %dparam{{.*}}(aligned)
+; CHECK: %dparam{{.*}}(unaligned)
     %load3 = load i32, i32 addrspace(1)* %dparam
 
-; CHECK: %relocate{{.*}}(aligned)
+; CHECK: %relocate{{.*}}(unaligned)
     %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam)
     %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 7, i32 7)
     %load4 = load i32, i32 addrspace(1)* %relocate
@@ -70,7 +70,7 @@ entry:
     %load6 = load i32, i32* %nd_load
 
     ; Load from a dereferenceable load
-; CHECK: %d4_load{{.*}}(aligned)
+; CHECK: %d4_load{{.*}}(unaligned)
     %d4_load = load i32*, i32** @globali32ptr, !dereferenceable !0
     %load7 = load i32, i32* %d4_load
 
@@ -85,7 +85,7 @@ entry:
     %load9 = load i32, i32* %d_or_null_load
 
     ; Load from a non-null pointer with dereferenceable_or_null
-; CHECK: %d_or_null_non_null_load{{.*}}(aligned)
+; CHECK: %d_or_null_non_null_load{{.*}}(unaligned)
     %d_or_null_non_null_load = load i32*, i32** @globali32ptr, !nonnull !2, !dereferenceable_or_null !0
     %load10 = load i32, i32* %d_or_null_non_null_load
 

diff  --git a/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll b/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
index 6c54453fad8f..ff1d94cb0ba5 100644
--- a/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
+++ b/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll
@@ -192,7 +192,7 @@ header:
   br label %header
 }
 
-define i32 @test6b(i1 %cnd, i32* dereferenceable(8) %p) {
+define i32 @test6b(i1 %cnd, i32* dereferenceable(8) align 4 %p) {
 entry: 
 ; CHECK-LABEL: @test6b
 ; CHECK: load i32, i32* %p

diff  --git a/llvm/test/Transforms/GVN/PRE/pre-load.ll b/llvm/test/Transforms/GVN/PRE/pre-load.ll
index cfa6b5e5fec9..45a93109e695 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load.ll
@@ -507,7 +507,7 @@ follow_2:
 ; dereferenceable can be loaded from speculatively without a risk of trapping.
 ; Since it is OK to speculate, PRE is allowed.
 
-define i32 @test15(i32* noalias nocapture readonly dereferenceable(8) %x, i32* noalias nocapture %r, i32 %a) {
+define i32 @test15(i32* noalias nocapture readonly dereferenceable(8) align 4 %x, i32* noalias nocapture %r, i32 %a) {
 
 ; CHECK-LABEL: @test15
 ; CHECK: entry:
@@ -548,7 +548,7 @@ if.end:
 ; dereferenceable can be loaded from speculatively without a risk of trapping.
 ; Since it is OK to speculate, PRE is allowed.
 
-define i32 @test16(i32* noalias nocapture readonly dereferenceable(8) %x, i32* noalias nocapture %r, i32 %a) {
+define i32 @test16(i32* noalias nocapture readonly dereferenceable(8) align 4 %x, i32* noalias nocapture %r, i32 %a) {
 
 ; CHECK-LABEL: @test16(
 ; CHECK: entry:

diff  --git a/llvm/test/Transforms/InstCombine/call-guard.ll b/llvm/test/Transforms/InstCombine/call-guard.ll
index 241b0788ad56..3d61a5a50d31 100644
--- a/llvm/test/Transforms/InstCombine/call-guard.ll
+++ b/llvm/test/Transforms/InstCombine/call-guard.ll
@@ -67,7 +67,7 @@ define void @negative_load(i32 %V1, i32* %P) {
   ret void
 }
 
-define void @deref_load(i32 %V1, i32* dereferenceable(4) %P) {
+define void @deref_load(i32 %V1, i32* dereferenceable(4) align 4 %P) {
 ; CHECK-LABEL: @deref_load
 ; CHECK-NEXT:  %V2 = load i32, i32* %P, align 4
 ; CHECK-NEXT:  %1 = and i32 %V2, %V1

diff  --git a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
index 6cae50d2c1a8..24bf6dd6c522 100644
--- a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll
@@ -83,7 +83,7 @@ define <2 x double> @load_generic(<2 x double>* %ptr, double %pt,
   ret <2 x double> %res
 }
 
-define <2 x double> @load_speculative(<2 x double>* dereferenceable(16) %ptr,
+define <2 x double> @load_speculative(<2 x double>* dereferenceable(16) align 4 %ptr,
 ; CHECK-LABEL: @load_speculative(
 ; CHECK-NEXT:    [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0
 ; CHECK-NEXT:    [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> undef, <2 x i32> zeroinitializer

diff  --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index bbed168265dc..5a0b0cae48d5 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -981,7 +981,7 @@ entry:
 
 ; Test that we can speculate the loads around the select even when we can't
 ; fold the load completely away.
-define i32 @test78_deref(i1 %flag, i32* dereferenceable(4) %x, i32* dereferenceable(4) %y, i32* %z) {
+define i32 @test78_deref(i1 %flag, i32* dereferenceable(4) align 4 %x, i32* dereferenceable(4) align 4 %y, i32* %z) {
 ; CHECK-LABEL: @test78_deref(
 ; CHECK-NEXT:    [[X_VAL:%.*]] = load i32, i32* [[X:%.*]], align 4
 ; CHECK-NEXT:    [[Y_VAL:%.*]] = load i32, i32* [[Y:%.*]], align 4

diff  --git a/llvm/test/Transforms/LICM/hoist-deref-load.ll b/llvm/test/Transforms/LICM/hoist-deref-load.ll
index 4570da5a46b1..dd4d8821955d 100644
--- a/llvm/test/Transforms/LICM/hoist-deref-load.ll
+++ b/llvm/test/Transforms/LICM/hoist-deref-load.ll
@@ -19,7 +19,7 @@ target triple = "x86_64-unknown-linux-gnu"
 ; CHECK: load i32, i32* %c, align 4
 ; CHECK: for.body:
 
-define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull dereferenceable(4) %c, i32 %n) #0 {
+define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull dereferenceable(4) align 4 %c, i32 %n) #0 {
 entry:
   %cmp11 = icmp sgt i32 %n, 0
   br i1 %cmp11, label %for.body, label %for.end
@@ -99,7 +99,7 @@ for.end:                                          ; preds = %for.inc, %entry
 ; CHECK: load i32, i32* %c2, align 4
 ; CHECK: for.body:
 
-define void @test3(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(12) %c, i32 %n) #0 {
+define void @test3(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(12) align 4 %c, i32 %n) #0 {
 entry:
   %cmp11 = icmp sgt i32 %n, 0
   br i1 %cmp11, label %for.body, label %for.end
@@ -183,7 +183,7 @@ for.end:                                          ; preds = %for.inc, %entry
 ; CHECK: load i32, i32* %c, align 4
 ; CHECK: for.body:
 
-define void @test5(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n) #0 {
+define void @test5(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n) #0 {
 entry:
   %not_null = icmp ne i32* %c, null
   br i1 %not_null, label %not.null, label %for.end
@@ -274,7 +274,7 @@ for.end:                                          ; preds = %for.inc, %entry
 
 define void @test7(i32* noalias %a, i32* %b, i32** %cptr, i32 %n) #0 {
 entry:
-  %c = load i32*, i32** %cptr, !dereferenceable !0
+  %c = load i32*, i32** %cptr, !dereferenceable !0, !align !{i64 4}
   %cmp11 = icmp sgt i32 %n, 0
   br i1 %cmp11, label %for.body, label %for.end
 
@@ -321,7 +321,7 @@ for.end:                                          ; preds = %for.inc, %entry
 
 define void @test8(i32* noalias %a, i32* %b, i32** %cptr, i32 %n) #0 {
 entry:
-  %c = load i32*, i32** %cptr, !dereferenceable_or_null !0
+  %c = load i32*, i32** %cptr, !dereferenceable_or_null !0, !align !{i64 4}
   %not_null = icmp ne i32* %c, null
   br i1 %not_null, label %not.null, label %for.end
 
@@ -405,7 +405,7 @@ for.end:                                          ; preds = %for.inc, %entry
 ; CHECK: if.then:
 ; CHECK: load i32, i32* %c, align 4
 
-define void @test10(i32* noalias %a, i32* %b, i32** dereferenceable(8) %cptr, i32 %n) #0 {
+define void @test10(i32* noalias %a, i32* %b, i32** dereferenceable(8) align 8 %cptr, i32 %n) #0 {
 entry:
   %cmp11 = icmp sgt i32 %n, 0
   br i1 %cmp11, label %for.body, label %for.end
@@ -475,7 +475,7 @@ for.end:                                          ; preds = %for.inc, %entry
 
 declare void @llvm.experimental.guard(i1, ...)
 
-define void @test12(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n) #0 {
+define void @test12(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n) #0 {
 ; Prove non-null ness of %c via a guard, not a branch.
 
 ; CHECK-LABEL: @test12(
@@ -560,7 +560,7 @@ for.end:                                          ; preds = %for.inc, %entry, %e
 
 ; Check that branch by condition "null check AND something" allows to hoist the
 ; load.
-define void @test14(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n, i1 %dummy_cond) #0 {
+define void @test14(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n, i1 %dummy_cond) #0 {
 
 ; CHECK-LABEL: @test14
 ; CHECK: load i32, i32* %c, align 4
@@ -602,7 +602,7 @@ for.end:                                          ; preds = %for.inc, %entry, %n
 
 ; Check that guard by condition "null check AND something" allows to hoist the
 ; load.
-define void @test15(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n, i1 %dummy_cond) #0 {
+define void @test15(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n, i1 %dummy_cond) #0 {
 
 ; CHECK-LABEL: @test15
 ; CHECK: load i32, i32* %c, align 4

diff  --git a/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll b/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
index b33fac7b36c2..f07a5a40a90b 100644
--- a/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
+++ b/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
@@ -119,7 +119,7 @@ end:
   ret i8* %x10
 }
 
-define i32* @test5(i32 %a, i32 %b, i32 %c, i32* dereferenceable(10) %ptr1, i32* dereferenceable(10) %ptr2, i32** dereferenceable(10) %ptr3) {
+define i32* @test5(i32 %a, i32 %b, i32 %c, i32* dereferenceable(10) %ptr1, i32* dereferenceable(10) %ptr2, i32** dereferenceable(10) align 8 %ptr3) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[T1:%.*]] = icmp eq i32 [[B:%.*]], 0

diff  --git a/llvm/test/Transforms/TailCallElim/reorder_load.ll b/llvm/test/Transforms/TailCallElim/reorder_load.ll
index c0a6ea00f71b..027cfe78bb4b 100644
--- a/llvm/test/Transforms/TailCallElim/reorder_load.ll
+++ b/llvm/test/Transforms/TailCallElim/reorder_load.ll
@@ -126,7 +126,7 @@ recurse:		; preds = %else
 
 ; This load can be moved above the call because the function won't write to it
 ; and the a_arg is dereferenceable.
-define fastcc i32 @raise_load_5(i32* dereferenceable(4) %a_arg, i32 %a_len_arg, i32 %start_arg) readonly {
+define fastcc i32 @raise_load_5(i32* dereferenceable(4) align 4 %a_arg, i32 %a_len_arg, i32 %start_arg) readonly {
 ; CHECK-LABEL: @raise_load_5(
 ; CHECK-NOT: call
 ; CHECK: load i32, i32*

diff  --git a/llvm/unittests/IR/FunctionTest.cpp b/llvm/unittests/IR/FunctionTest.cpp
index c0744b08605b..0ecba35c329f 100644
--- a/llvm/unittests/IR/FunctionTest.cpp
+++ b/llvm/unittests/IR/FunctionTest.cpp
@@ -143,7 +143,7 @@ TEST(FunctionTest, GetPointerAlignment) {
   FunctionType *FuncType(FunctionType::get(VoidType, false));
   std::unique_ptr<Function> Func(Function::Create(
       FuncType, GlobalValue::ExternalLinkage));
-  EXPECT_EQ(MaybeAlign(), Func->getPointerAlignment(DataLayout("")));
+  EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("")));
   EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("Fi8")));
   EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("Fn8")));
   EXPECT_EQ(Align(2), Func->getPointerAlignment(DataLayout("Fi16")));
@@ -153,7 +153,7 @@ TEST(FunctionTest, GetPointerAlignment) {
 
   Func->setAlignment(Align(4));
 
-  EXPECT_EQ(MaybeAlign(), Func->getPointerAlignment(DataLayout("")));
+  EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("")));
   EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("Fi8")));
   EXPECT_EQ(Align(4), Func->getPointerAlignment(DataLayout("Fn8")));
   EXPECT_EQ(Align(2), Func->getPointerAlignment(DataLayout("Fi16")));

diff  --git a/polly/test/ScopInfo/invariant_load_dereferenceable.ll b/polly/test/ScopInfo/invariant_load_dereferenceable.ll
index 420bbfed2544..dbc56a12c81d 100644
--- a/polly/test/ScopInfo/invariant_load_dereferenceable.ll
+++ b/polly/test/ScopInfo/invariant_load_dereferenceable.ll
@@ -17,7 +17,7 @@
 
 ; CHECK-NOT: Function: foo_undereferanceable
 
-define void @foo_dereferanceable(double* %A, double* %B, i64* dereferenceable(8) %sizeA_ptr,
+define void @foo_dereferanceable(double* %A, double* %B, i64* dereferenceable(8) align 8 %sizeA_ptr,
 		i32 %lb.i, i32 %lb.j, i32 %ub.i, i32 %ub.j) {
 entry:
 	br label %for.i


        


More information about the llvm-commits mailing list