[llvm] 0fc624f - [IR] Return AAMDNodes from Instruction::getMetadata() (NFC)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 16 12:07:06 PDT 2021
Author: Nikita Popov
Date: 2021-09-16T21:06:57+02:00
New Revision: 0fc624f029f568e91caf74d90abc5d8d971151c2
URL: https://github.com/llvm/llvm-project/commit/0fc624f029f568e91caf74d90abc5d8d971151c2
DIFF: https://github.com/llvm/llvm-project/commit/0fc624f029f568e91caf74d90abc5d8d971151c2.diff
LOG: [IR] Return AAMDNodes from Instruction::getMetadata() (NFC)
getMetadata() currently uses a weird API where it populates a
structure passed to it, and optionally merges into it. Instead,
we can return the AAMDNodes and provide a separate merge() API.
This makes usages more compact.
Differential Revision: https://reviews.llvm.org/D109852
Added:
Modified:
llvm/include/llvm/IR/Instruction.h
llvm/include/llvm/IR/Metadata.h
llvm/lib/Analysis/MemoryLocation.cpp
llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
llvm/lib/Transforms/IPO/FunctionAttrs.cpp
llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
llvm/lib/Transforms/Scalar/GVN.cpp
llvm/lib/Transforms/Scalar/JumpThreading.cpp
llvm/lib/Transforms/Scalar/LICM.cpp
llvm/lib/Transforms/Scalar/SROA.cpp
llvm/lib/Transforms/Utils/SimplifyCFG.cpp
polly/lib/Analysis/ScopDetection.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index 57c34f37db6aa..15a4048090d07 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -307,10 +307,8 @@ class Instruction : public User,
Value::getAllMetadata(MDs);
}
- /// Fills the AAMDNodes structure with AA metadata from this instruction.
- /// When Merge is true, the existing AA metadata is merged with that from this
- /// instruction providing the most-general result.
- void getAAMetadata(AAMDNodes &N, bool Merge = false) const;
+ /// Returns the AA metadata for this instruction.
+ AAMDNodes getAAMetadata() const;
/// Set the metadata of the specified kind to the specified node. This updates
/// or replaces metadata if already present, or removes it if Node is null.
diff --git a/llvm/include/llvm/IR/Metadata.h b/llvm/include/llvm/IR/Metadata.h
index 17a9c3a77f4e4..ad0f5c390670a 100644
--- a/llvm/include/llvm/IR/Metadata.h
+++ b/llvm/include/llvm/IR/Metadata.h
@@ -707,6 +707,10 @@ struct AAMDNodes {
Result.NoAlias = NoAlias;
return Result;
}
+
+ /// Given two sets of AAMDNodes applying to potentially
diff erent locations,
+ /// determine the best AAMDNodes that apply to both.
+ AAMDNodes merge(const AAMDNodes &Other) const;
};
// Specialize DenseMapInfo for AAMDNodes.
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index ef9cda37ce357..7f2d04c495658 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -35,54 +35,44 @@ void LocationSize::print(raw_ostream &OS) const {
}
MemoryLocation MemoryLocation::get(const LoadInst *LI) {
- AAMDNodes AATags;
- LI->getAAMetadata(AATags);
const auto &DL = LI->getModule()->getDataLayout();
return MemoryLocation(
LI->getPointerOperand(),
- LocationSize::precise(DL.getTypeStoreSize(LI->getType())), AATags);
+ LocationSize::precise(DL.getTypeStoreSize(LI->getType())),
+ LI->getAAMetadata());
}
MemoryLocation MemoryLocation::get(const StoreInst *SI) {
- AAMDNodes AATags;
- SI->getAAMetadata(AATags);
const auto &DL = SI->getModule()->getDataLayout();
return MemoryLocation(SI->getPointerOperand(),
LocationSize::precise(DL.getTypeStoreSize(
SI->getValueOperand()->getType())),
- AATags);
+ SI->getAAMetadata());
}
MemoryLocation MemoryLocation::get(const VAArgInst *VI) {
- AAMDNodes AATags;
- VI->getAAMetadata(AATags);
-
return MemoryLocation(VI->getPointerOperand(),
- LocationSize::afterPointer(), AATags);
+ LocationSize::afterPointer(), VI->getAAMetadata());
}
MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) {
- AAMDNodes AATags;
- CXI->getAAMetadata(AATags);
const auto &DL = CXI->getModule()->getDataLayout();
return MemoryLocation(CXI->getPointerOperand(),
LocationSize::precise(DL.getTypeStoreSize(
CXI->getCompareOperand()->getType())),
- AATags);
+ CXI->getAAMetadata());
}
MemoryLocation MemoryLocation::get(const AtomicRMWInst *RMWI) {
- AAMDNodes AATags;
- RMWI->getAAMetadata(AATags);
const auto &DL = RMWI->getModule()->getDataLayout();
return MemoryLocation(RMWI->getPointerOperand(),
LocationSize::precise(DL.getTypeStoreSize(
RMWI->getValOperand()->getType())),
- AATags);
+ RMWI->getAAMetadata());
}
Optional<MemoryLocation> MemoryLocation::getOrNone(const Instruction *Inst) {
@@ -117,10 +107,7 @@ MemoryLocation MemoryLocation::getForSource(const AnyMemTransferInst *MTI) {
// memcpy/memmove can have AA tags. For memcpy, they apply
// to both the source and the destination.
- AAMDNodes AATags;
- MTI->getAAMetadata(AATags);
-
- return MemoryLocation(MTI->getRawSource(), Size, AATags);
+ return MemoryLocation(MTI->getRawSource(), Size, MTI->getAAMetadata());
}
MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
@@ -138,17 +125,13 @@ MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
// memcpy/memmove can have AA tags. For memcpy, they apply
// to both the source and the destination.
- AAMDNodes AATags;
- MI->getAAMetadata(AATags);
-
- return MemoryLocation(MI->getRawDest(), Size, AATags);
+ return MemoryLocation(MI->getRawDest(), Size, MI->getAAMetadata());
}
MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
unsigned ArgIdx,
const TargetLibraryInfo *TLI) {
- AAMDNodes AATags;
- Call->getAAMetadata(AATags);
+ AAMDNodes AATags = Call->getAAMetadata();
const Value *Arg = Call->getArgOperand(ArgIdx);
// We may be able to produce an exact size for known intrinsics.
diff --git a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 20d718f4fad34..32a5f9df777c4 100644
--- a/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -521,21 +521,22 @@ static const MDNode *getLeastCommonType(const MDNode *A, const MDNode *B) {
return Ret;
}
-void Instruction::getAAMetadata(AAMDNodes &N, bool Merge) const {
- if (Merge) {
- N.TBAA =
- MDNode::getMostGenericTBAA(N.TBAA, getMetadata(LLVMContext::MD_tbaa));
- N.TBAAStruct = nullptr;
- N.Scope = MDNode::getMostGenericAliasScope(
- N.Scope, getMetadata(LLVMContext::MD_alias_scope));
- N.NoAlias =
- MDNode::intersect(N.NoAlias, getMetadata(LLVMContext::MD_noalias));
- } else {
- N.TBAA = getMetadata(LLVMContext::MD_tbaa);
- N.TBAAStruct = getMetadata(LLVMContext::MD_tbaa_struct);
- N.Scope = getMetadata(LLVMContext::MD_alias_scope);
- N.NoAlias = getMetadata(LLVMContext::MD_noalias);
- }
+AAMDNodes AAMDNodes::merge(const AAMDNodes &Other) const {
+ AAMDNodes Result;
+ Result.TBAA = MDNode::getMostGenericTBAA(TBAA, Other.TBAA);
+ Result.TBAAStruct = nullptr;
+ Result.Scope = MDNode::getMostGenericAliasScope(Scope, Other.Scope);
+ Result.NoAlias = MDNode::intersect(NoAlias, Other.NoAlias);
+ return Result;
+}
+
+AAMDNodes Instruction::getAAMetadata() const {
+ AAMDNodes Result;
+ Result.TBAA = getMetadata(LLVMContext::MD_tbaa);
+ Result.TBAAStruct = getMetadata(LLVMContext::MD_tbaa_struct);
+ Result.Scope = getMetadata(LLVMContext::MD_alias_scope);
+ Result.NoAlias = getMetadata(LLVMContext::MD_noalias);
+ return Result;
}
static const MDNode *createAccessTag(const MDNode *AccessType) {
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 2000a7f8752ef..34b1b3faec895 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1295,11 +1295,9 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
Align BaseAlign = getMemOpAlign(LI);
- AAMDNodes AAMetadata;
- LI.getAAMetadata(AAMetadata);
auto MMO = MF->getMachineMemOperand(
Ptr, Flags, MRI->getType(Regs[i]),
- commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
+ commonAlignment(BaseAlign, Offsets[i] / 8), LI.getAAMetadata(), Ranges,
LI.getSyncScopeID(), LI.getOrdering());
MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
}
@@ -1337,11 +1335,9 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
Align BaseAlign = getMemOpAlign(SI);
- AAMDNodes AAMetadata;
- SI.getAAMetadata(AAMetadata);
auto MMO = MF->getMachineMemOperand(
Ptr, Flags, MRI->getType(Vals[i]),
- commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
+ commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
SI.getSyncScopeID(), SI.getOrdering());
MIRBuilder.buildStore(Vals[i], Addr, *MMO);
}
@@ -2768,14 +2764,11 @@ bool IRTranslator::translateAtomicCmpXchg(const User &U,
Register Cmp = getOrCreateVReg(*I.getCompareOperand());
Register NewVal = getOrCreateVReg(*I.getNewValOperand());
- AAMDNodes AAMetadata;
- I.getAAMetadata(AAMetadata);
-
MIRBuilder.buildAtomicCmpXchgWithSuccess(
OldValRes, SuccessRes, Addr, Cmp, NewVal,
*MF->getMachineMemOperand(
MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
- getMemOpAlign(I), AAMetadata, nullptr, I.getSyncScopeID(),
+ getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
I.getSuccessOrdering(), I.getFailureOrdering()));
return true;
}
@@ -2835,14 +2828,11 @@ bool IRTranslator::translateAtomicRMW(const User &U,
break;
}
- AAMDNodes AAMetadata;
- I.getAAMetadata(AAMetadata);
-
MIRBuilder.buildAtomicRMW(
Opcode, Res, Addr, Val,
*MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
Flags, MRI->getType(Val), getMemOpAlign(I),
- AAMetadata, nullptr, I.getSyncScopeID(),
+ I.getAAMetadata(), nullptr, I.getSyncScopeID(),
I.getOrdering()));
return true;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 084bcac0b8679..251e6989b99ec 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -2318,8 +2318,7 @@ FastISel::createMachineMemOperandFor(const Instruction *I) const {
bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
- AAMDNodes AAInfo;
- I->getAAMetadata(AAInfo);
+ AAMDNodes AAInfo = I->getAAMetadata();
if (!Alignment) // Ensure that codegen never sees alignment 0.
Alignment = DL.getABITypeAlign(ValTy);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 27ceae49081db..7bb3031d6f8ed 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4063,8 +4063,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
Type *Ty = I.getType();
Align Alignment = I.getAlign();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
+ AAMDNodes AAInfo = I.getAAMetadata();
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SmallVector<EVT, 4> ValueVTs, MemVTs;
@@ -4191,13 +4190,11 @@ void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
const Value *SV = I.getOperand(0);
Type *Ty = I.getType();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
assert(
(!AA ||
!AA->pointsToConstantMemory(MemoryLocation(
SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
- AAInfo))) &&
+ I.getAAMetadata()))) &&
"load_from_swift_error should not be constant memory");
SmallVector<EVT, 4> ValueVTs;
@@ -4255,8 +4252,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
SDLoc dl = getCurSDLoc();
Align Alignment = I.getAlign();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
+ AAMDNodes AAInfo = I.getAAMetadata();
auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
@@ -4327,14 +4323,11 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
if (!Alignment)
Alignment = DAG.getEVTAlign(VT);
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
-
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
- VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
+ VT.getStoreSize().getKnownMinSize(), *Alignment, I.getAAMetadata());
SDValue StoreNode =
DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
ISD::UNINDEXED, false /* Truncating */, IsCompressing);
@@ -4418,9 +4411,6 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
.getValueOr(DAG.getEVTAlign(VT.getScalarType()));
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
-
SDValue Base;
SDValue Index;
ISD::MemIndexType IndexType;
@@ -4433,7 +4423,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
MachinePointerInfo(AS), MachineMemOperand::MOStore,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
- MemoryLocation::UnknownSize, Alignment, AAInfo);
+ MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
@@ -4491,8 +4481,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
if (!Alignment)
Alignment = DAG.getEVTAlign(VT);
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
+ AAMDNodes AAInfo = I.getAAMetadata();
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
// Do not serialize masked loads of constant memory with anything.
@@ -4535,8 +4524,6 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
->getMaybeAlignValue()
.getValueOr(DAG.getEVTAlign(VT.getScalarType()));
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SDValue Root = DAG.getRoot();
@@ -4551,7 +4538,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
MachinePointerInfo(AS), MachineMemOperand::MOLoad,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
- MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges);
+ MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
@@ -4829,12 +4816,11 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
SDValue Result;
if (IsTgtIntrinsic) {
// This is target intrinsic that touches memory
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
Result =
DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
MachinePointerInfo(Info.ptrVal, Info.offset),
- Info.align, Info.flags, Info.size, AAInfo);
+ Info.align, Info.flags, Info.size,
+ I.getAAMetadata());
} else if (!HasChain) {
Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
} else if (!I.getType()->isVoidTy()) {
@@ -5872,12 +5858,11 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// FIXME: Support passing
diff erent dest/src alignments to the memcpy DAG
// node.
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
/* AlwaysInline */ false, isTC,
MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)), AAInfo);
+ MachinePointerInfo(I.getArgOperand(1)),
+ I.getAAMetadata());
updateDAGForMaybeTailCall(MC);
return;
}
@@ -5895,12 +5880,11 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
// FIXME: Support passing
diff erent dest/src alignments to the memcpy DAG
// node.
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
/* AlwaysInline */ true, isTC,
MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)), AAInfo);
+ MachinePointerInfo(I.getArgOperand(1)),
+ I.getAAMetadata());
updateDAGForMaybeTailCall(MC);
return;
}
@@ -5914,10 +5898,9 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isVol = MSI.isVolatile();
bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
- MachinePointerInfo(I.getArgOperand(0)), AAInfo);
+ MachinePointerInfo(I.getArgOperand(0)),
+ I.getAAMetadata());
updateDAGForMaybeTailCall(MS);
return;
}
@@ -5935,11 +5918,10 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
// FIXME: Support passing
diff erent dest/src alignments to the memmove DAG
// node.
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
isTC, MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)), AAInfo);
+ MachinePointerInfo(I.getArgOperand(1)),
+ I.getAAMetadata());
updateDAGForMaybeTailCall(MM);
return;
}
@@ -7348,8 +7330,7 @@ void SelectionDAGBuilder::visitVPLoadGather(const VPIntrinsic &VPIntrin, EVT VT,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
Value *PtrOperand = VPIntrin.getArgOperand(0);
MaybeAlign Alignment = DAG.getEVTAlign(VT);
- AAMDNodes AAInfo;
- VPIntrin.getAAMetadata(AAInfo);
+ AAMDNodes AAInfo = VPIntrin.getAAMetadata();
const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range);
SDValue LD;
bool AddToChain = true;
@@ -7413,8 +7394,7 @@ void SelectionDAGBuilder::visitVPStoreScatter(const VPIntrinsic &VPIntrin,
Value *PtrOperand = VPIntrin.getArgOperand(1);
EVT VT = OpValues[0].getValueType();
MaybeAlign Alignment = DAG.getEVTAlign(VT);
- AAMDNodes AAInfo;
- VPIntrin.getAAMetadata(AAInfo);
+ AAMDNodes AAInfo = VPIntrin.getAAMetadata();
SDValue ST;
if (!isScatter) {
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
@@ -7899,12 +7879,11 @@ bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
// because the return pointer needs to be adjusted by the size of
// the copied memory.
SDValue Root = isVol ? getRoot() : getMemoryRoot();
- AAMDNodes AAInfo;
- I.getAAMetadata(AAInfo);
SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
/*isTailCall=*/false,
MachinePointerInfo(I.getArgOperand(0)),
- MachinePointerInfo(I.getArgOperand(1)), AAInfo);
+ MachinePointerInfo(I.getArgOperand(1)),
+ I.getAAMetadata());
assert(MC.getNode() != nullptr &&
"** memcpy should not be lowered as TailCall in mempcpy context **");
DAG.setRoot(MC);
diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 5d6b750d9a460..dd31073d1343a 100644
--- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -313,9 +313,7 @@ doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
IRB.CreateLoad(OrigLoad->getType(), V, V->getName() + ".val");
newLoad->setAlignment(OrigLoad->getAlign());
// Transfer the AA info too.
- AAMDNodes AAInfo;
- OrigLoad->getAAMetadata(AAInfo);
- newLoad->setAAMetadata(AAInfo);
+ newLoad->setAAMetadata(OrigLoad->getAAMetadata());
Args.push_back(newLoad);
ArgAttrVec.push_back(AttributeSet());
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 6814dca489fc5..95c9ed9e304f0 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -175,9 +175,8 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
if (!Arg->getType()->isPtrOrPtrVectorTy())
continue;
- AAMDNodes AAInfo;
- I->getAAMetadata(AAInfo);
- MemoryLocation Loc = MemoryLocation::getBeforeOrAfter(Arg, AAInfo);
+ MemoryLocation Loc =
+ MemoryLocation::getBeforeOrAfter(Arg, I->getAAMetadata());
// Skip accesses to local or constant memory as they don't impact the
// externally visible mod/ref behavior.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 80abc775299a5..213b3b86a9c03 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -337,8 +337,7 @@ void PointerReplacer::replace(Instruction *I) {
MemCpy->getIntrinsicID(), MemCpy->getRawDest(), MemCpy->getDestAlign(),
SrcV, MemCpy->getSourceAlign(), MemCpy->getLength(),
MemCpy->isVolatile());
- AAMDNodes AAMD;
- MemCpy->getAAMetadata(AAMD);
+ AAMDNodes AAMD = MemCpy->getAAMetadata();
if (AAMD)
NewI->setAAMetadata(AAMD);
@@ -649,9 +648,7 @@ static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
if (NumElements == 1) {
LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
".unpack");
- AAMDNodes AAMD;
- LI.getAAMetadata(AAMD);
- NewLoad->setAAMetadata(AAMD);
+ NewLoad->setAAMetadata(LI.getAAMetadata());
return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
UndefValue::get(T), NewLoad, 0, Name));
}
@@ -680,9 +677,7 @@ static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
ST->getElementType(i), Ptr,
commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load.
- AAMDNodes AAMD;
- LI.getAAMetadata(AAMD);
- L->setAAMetadata(AAMD);
+ L->setAAMetadata(LI.getAAMetadata());
V = IC.Builder.CreateInsertValue(V, L, i);
}
@@ -695,9 +690,7 @@ static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
auto NumElements = AT->getNumElements();
if (NumElements == 1) {
LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
- AAMDNodes AAMD;
- LI.getAAMetadata(AAMD);
- NewLoad->setAAMetadata(AAMD);
+ NewLoad->setAAMetadata(LI.getAAMetadata());
return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
UndefValue::get(T), NewLoad, 0, Name));
}
@@ -729,9 +722,7 @@ static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
commonAlignment(Align, Offset),
Name + ".unpack");
- AAMDNodes AAMD;
- LI.getAAMetadata(AAMD);
- L->setAAMetadata(AAMD);
+ L->setAAMetadata(LI.getAAMetadata());
V = IC.Builder.CreateInsertValue(V, L, i);
Offset += EltSize;
}
@@ -1208,9 +1199,7 @@ static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
- AAMDNodes AAMD;
- SI.getAAMetadata(AAMD);
- NS->setAAMetadata(AAMD);
+ NS->setAAMetadata(SI.getAAMetadata());
}
return true;
@@ -1256,9 +1245,7 @@ static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = commonAlignment(Align, Offset);
Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
- AAMDNodes AAMD;
- SI.getAAMetadata(AAMD);
- NS->setAAMetadata(AAMD);
+ NS->setAAMetadata(SI.getAAMetadata());
Offset += EltSize;
}
@@ -1569,12 +1556,9 @@ bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {
NewSI->setDebugLoc(MergedLoc);
// If the two stores had AA tags, merge them.
- AAMDNodes AATags;
- SI.getAAMetadata(AATags);
- if (AATags) {
- OtherStore->getAAMetadata(AATags, /* Merge = */ true);
- NewSI->setAAMetadata(AATags);
- }
+ AAMDNodes AATags = SI.getAAMetadata();
+ if (AATags)
+ NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
// Nuke the old stores.
eraseInstFromFunction(SI);
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 902f41d426258..130b50d10909a 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -3172,9 +3172,7 @@ Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);
// Whatever aliasing information we had for the orignal load must also
// hold for the smaller load, so propagate the annotations.
- AAMDNodes Nodes;
- L->getAAMetadata(Nodes);
- NL->setAAMetadata(Nodes);
+ NL->setAAMetadata(L->getAAMetadata());
// Returning the load directly will cause the main loop to insert it in
// the wrong spot, so use replaceInstUsesWith().
return replaceInstUsesWith(EV, NL);
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 308b747f5a90d..7b9dad39719f1 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1230,8 +1230,7 @@ void GVN::eliminatePartiallyRedundantLoad(
}
// Transfer the old load's AA tags to the new load.
- AAMDNodes Tags;
- Load->getAAMetadata(Tags);
+ AAMDNodes Tags = Load->getAAMetadata();
if (Tags)
NewLoad->setAAMetadata(Tags);
diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index 9dc3b03513462..688902ecb9ff6 100644
--- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1363,8 +1363,7 @@ bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) {
// If all of the loads and stores that feed the value have the same AA tags,
// then we can propagate them onto any newly inserted loads.
- AAMDNodes AATags;
- LoadI->getAAMetadata(AATags);
+ AAMDNodes AATags = LoadI->getAAMetadata();
SmallPtrSet<BasicBlock*, 8> PredsScanned;
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 21e1cb6959ee1..158eb2742104a 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -2153,9 +2153,9 @@ bool llvm::promoteLoopAccessesToScalars(
// Merge the AA tags.
if (LoopUses.empty()) {
// On the first load/store, just take its AA tags.
- UI->getAAMetadata(AATags);
+ AATags = UI->getAAMetadata();
} else if (AATags) {
- UI->getAAMetadata(AATags, /* Merge = */ true);
+ AATags = AATags.merge(UI->getAAMetadata());
}
LoopUses.push_back(UI);
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 98e0f11085086..7f9268045167e 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -1275,8 +1275,7 @@ static void speculatePHINodeLoads(PHINode &PN) {
// Get the AA tags and alignment to use from one of the loads. It does not
// matter which one we get and if any
diff er.
- AAMDNodes AATags;
- SomeLoad->getAAMetadata(AATags);
+ AAMDNodes AATags = SomeLoad->getAAMetadata();
Align Alignment = SomeLoad->getAlign();
// Rewrite all loads of the PN to use the new PHI.
@@ -1398,8 +1397,7 @@ static void speculateSelectInstLoads(SelectInst &SI) {
TL->setAlignment(LI->getAlign());
FL->setAlignment(LI->getAlign());
- AAMDNodes Tags;
- LI->getAAMetadata(Tags);
+ AAMDNodes Tags = LI->getAAMetadata();
if (Tags) {
TL->setAAMetadata(Tags);
FL->setAAMetadata(Tags);
@@ -2545,8 +2543,7 @@ class llvm::sroa::AllocaSliceRewriter
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
- AAMDNodes AATags;
- LI.getAAMetadata(AATags);
+ AAMDNodes AATags = LI.getAAMetadata();
unsigned AS = LI.getPointerAddressSpace();
@@ -2710,9 +2707,7 @@ class llvm::sroa::AllocaSliceRewriter
Value *OldOp = SI.getOperand(1);
assert(OldOp == OldPtr);
- AAMDNodes AATags;
- SI.getAAMetadata(AATags);
-
+ AAMDNodes AATags = SI.getAAMetadata();
Value *V = SI.getValueOperand();
// Strip all inbounds GEPs and pointer casts to try to dig out any root
@@ -2821,8 +2816,7 @@ class llvm::sroa::AllocaSliceRewriter
LLVM_DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getRawDest() == OldPtr);
- AAMDNodes AATags;
- II.getAAMetadata(AATags);
+ AAMDNodes AATags = II.getAAMetadata();
// If the memset has a variable size, it cannot be split, just adjust the
// pointer to the new alloca.
@@ -2950,8 +2944,7 @@ class llvm::sroa::AllocaSliceRewriter
LLVM_DEBUG(dbgs() << " original: " << II << "\n");
- AAMDNodes AATags;
- II.getAAMetadata(AATags);
+ AAMDNodes AATags = II.getAAMetadata();
bool IsDest = &II.getRawDestUse() == OldUse;
assert((IsDest && II.getRawDest() == OldPtr) ||
@@ -3458,9 +3451,7 @@ class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
// We have an aggregate being loaded, split it apart.
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
- AAMDNodes AATags;
- LI.getAAMetadata(AATags);
- LoadOpSplitter Splitter(&LI, *U, LI.getType(), AATags,
+ LoadOpSplitter Splitter(&LI, *U, LI.getType(), LI.getAAMetadata(),
getAdjustedAlignment(&LI, 0), DL);
Value *V = UndefValue::get(LI.getType());
Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
@@ -3511,9 +3502,7 @@ class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
// We have an aggregate being stored, split it apart.
LLVM_DEBUG(dbgs() << " original: " << SI << "\n");
- AAMDNodes AATags;
- SI.getAAMetadata(AATags);
- StoreOpSplitter Splitter(&SI, *U, V->getType(), AATags,
+ StoreOpSplitter Splitter(&SI, *U, V->getType(), SI.getAAMetadata(),
getAdjustedAlignment(&SI, 0), DL);
Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
Visited.erase(&SI);
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 31ef306f8b884..0865e4a41bfd2 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -3486,10 +3486,7 @@ static bool mergeConditionalStoreToAddress(
/*BranchWeights=*/nullptr, DTU);
QB.SetInsertPoint(T);
StoreInst *SI = cast<StoreInst>(QB.CreateStore(QPHI, Address));
- AAMDNodes AAMD;
- PStore->getAAMetadata(AAMD, /*Merge=*/false);
- PStore->getAAMetadata(AAMD, /*Merge=*/true);
- SI->setAAMetadata(AAMD);
+ SI->setAAMetadata(PStore->getAAMetadata().merge(QStore->getAAMetadata()));
// Choose the minimum alignment. If we could prove both stores execute, we
// could use biggest one. In this case, though, we only know that one of the
// stores executes. And we don't know it's safe to take the alignment from a
diff --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp
index 35a69b15231e5..a4a52248535aa 100644
--- a/polly/lib/Analysis/ScopDetection.cpp
+++ b/polly/lib/Analysis/ScopDetection.cpp
@@ -1147,8 +1147,7 @@ bool ScopDetection::isValidAccess(Instruction *Inst, const SCEV *AF,
// Check if the base pointer of the memory access does alias with
// any other pointer. This cannot be handled at the moment.
- AAMDNodes AATags;
- Inst->getAAMetadata(AATags);
+ AAMDNodes AATags = Inst->getAAMetadata();
AliasSet &AS = Context.AST.getAliasSetFor(
MemoryLocation::getBeforeOrAfter(BP->getValue(), AATags));
More information about the llvm-commits
mailing list