[clang] 0957233 - [Alignment][NFC] Use Align with CreateMaskedStore
Guillaume Chatelet via cfe-commits
cfe-commits at lists.llvm.org
Wed Jan 22 02:05:13 PST 2020
Author: Guillaume Chatelet
Date: 2020-01-22T11:04:39+01:00
New Revision: 0957233320eb0096bbb7665e0762a13bad1e7cb8
URL: https://github.com/llvm/llvm-project/commit/0957233320eb0096bbb7665e0762a13bad1e7cb8
DIFF: https://github.com/llvm/llvm-project/commit/0957233320eb0096bbb7665e0762a13bad1e7cb8.diff
LOG: [Alignment][NFC] Use Align with CreateMaskedStore
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73106
Added:
Modified:
clang/lib/CodeGen/CGBuiltin.cpp
llvm/include/llvm/IR/Constants.h
llvm/include/llvm/IR/IRBuilder.h
llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
llvm/lib/IR/AutoUpgrade.cpp
llvm/lib/IR/IRBuilder.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 29eebbb403ea..86a3f1e0d237 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -9714,9 +9714,8 @@ static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
return MaskVec;
}
-static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops,
- unsigned Align) {
+static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -9724,7 +9723,7 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
+ return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
}
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
@@ -10592,12 +10591,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storedquqi512_mask:
case X86::BI__builtin_ia32_storeupd512_mask:
case X86::BI__builtin_ia32_storeups512_mask:
- return EmitX86MaskedStore(*this, Ops, 1);
+ return EmitX86MaskedStore(*this, Ops, Align::None());
case X86::BI__builtin_ia32_storess128_mask:
- case X86::BI__builtin_ia32_storesd128_mask: {
- return EmitX86MaskedStore(*this, Ops, 1);
- }
+ case X86::BI__builtin_ia32_storesd128_mask:
+ return EmitX86MaskedStore(*this, Ops, Align::None());
+
case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
case X86::BI__builtin_ia32_vpopcntq_128:
@@ -10708,11 +10707,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32store512_mask:
case X86::BI__builtin_ia32_movdqa64store512_mask:
case X86::BI__builtin_ia32_storeaps512_mask:
- case X86::BI__builtin_ia32_storeapd512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedStore(*this, Ops, Align);
- }
+ case X86::BI__builtin_ia32_storeapd512_mask:
+ return EmitX86MaskedStore(
+ *this, Ops,
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
+
case X86::BI__builtin_ia32_loadups128_mask:
case X86::BI__builtin_ia32_loadups256_mask:
case X86::BI__builtin_ia32_loadups512_mask:
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index 262ab439df65..9b3c1e723a10 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -157,6 +157,10 @@ class ConstantInt final : public ConstantData {
return Val.getSExtValue();
}
+ /// Return the constant as an llvm::Align. Note that this method can assert if
+ /// the value does not fit in 64 bits or is not a power of two.
+ inline Align getAlignValue() const { return Align(getZExtValue()); }
+
/// A helper method that can be used to determine if the constant contained
/// within is equal to a constant. This only works for very small values,
/// because this is all that can be represented with all types.
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index b02945f98101..4d242ae64067 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -752,13 +752,21 @@ class IRBuilderBase {
Value *PassThru = nullptr,
const Twine &Name = ""),
"Use the version that takes Align instead") {
- return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name);
+ return CreateMaskedLoad(Ptr, assumeAligned(Alignment), Mask, PassThru,
+ Name);
}
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
/// Create a call to Masked Store intrinsic
- CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
+ LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedStore(Value *Val, Value *Ptr,
+ unsigned Alignment,
+ Value *Mask),
+ "Use the version that takes Align instead") {
+ return CreateMaskedStore(Val, Ptr, assumeAligned(Alignment), Mask);
+ }
+
+ CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
Value *Mask);
/// Create a call to Masked Gather intrinsic
diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
index ee72de67d875..e7ef6c769970 100644
--- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
@@ -849,39 +849,41 @@ bool ScalarizeMaskedMemIntrin::optimizeCallInst(CallInst *CI,
bool &ModifiedDT) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II) {
- unsigned Alignment;
switch (II->getIntrinsicID()) {
default:
break;
- case Intrinsic::masked_load: {
+ case Intrinsic::masked_load:
// Scalarize unsupported vector masked load
- Alignment = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
- if (TTI->isLegalMaskedLoad(CI->getType(), MaybeAlign(Alignment)))
+ if (TTI->isLegalMaskedLoad(
+ CI->getType(),
+ cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue()))
return false;
scalarizeMaskedLoad(CI, ModifiedDT);
return true;
- }
- case Intrinsic::masked_store: {
- Alignment = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
- if (TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(),
- MaybeAlign(Alignment)))
+ case Intrinsic::masked_store:
+ if (TTI->isLegalMaskedStore(
+ CI->getArgOperand(0)->getType(),
+ cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue()))
return false;
scalarizeMaskedStore(CI, ModifiedDT);
return true;
- }
- case Intrinsic::masked_gather:
- Alignment = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ case Intrinsic::masked_gather: {
+ unsigned Alignment =
+ cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
if (TTI->isLegalMaskedGather(CI->getType(), MaybeAlign(Alignment)))
return false;
scalarizeMaskedGather(CI, ModifiedDT);
return true;
- case Intrinsic::masked_scatter:
- Alignment = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ }
+ case Intrinsic::masked_scatter: {
+ unsigned Alignment =
+ cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
if (TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType(),
MaybeAlign(Alignment)))
return false;
scalarizeMaskedScatter(CI, ModifiedDT);
return true;
+ }
case Intrinsic::masked_expandload:
if (TTI->isLegalMaskedExpandLoad(CI->getType()))
return false;
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index f587414b3294..5a598517cfe3 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1237,18 +1237,19 @@ static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(Data->getType()));
- unsigned Align =
- Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
+ const Align Alignment =
+ Aligned ? Align(cast<VectorType>(Data->getType())->getBitWidth() / 8)
+ : Align::None();
// If the mask is all ones just emit a regular store.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
- return Builder.CreateAlignedStore(Data, Ptr, Align);
+ return Builder.CreateAlignedStore(Data, Ptr, Alignment);
// Convert the mask from an integer type to a vector of i1.
unsigned NumElts = Data->getType()->getVectorNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
- return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
+ return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
}
static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 6e365a4f8345..b86ee4964b7e 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -487,19 +487,19 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
}
/// Create a call to a Masked Store intrinsic.
-/// \p Val - data to be stored,
-/// \p Ptr - base pointer for the store
-/// \p Align - alignment of the destination location
-/// \p Mask - vector of booleans which indicates what vector lanes should
-/// be accessed in memory
+/// \p Val - data to be stored,
+/// \p Ptr - base pointer for the store
+/// \p Alignment - alignment of the destination location
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
- unsigned Align, Value *Mask) {
+ Align Alignment, Value *Mask) {
auto *PtrTy = cast<PointerType>(Ptr->getType());
Type *DataTy = PtrTy->getElementType();
assert(DataTy->isVectorTy() && "Ptr should point to a vector");
assert(Mask && "Mask should not be all-ones (null)");
Type *OverloadedTypes[] = { DataTy, PtrTy };
- Value *Ops[] = { Val, Ptr, getInt32(Align), Mask };
+ Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index a7aac58c795e..787496b8f847 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1372,7 +1372,7 @@ static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
// on each element's most significant bit (the sign bit).
Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
- IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
+ IC.Builder.CreateMaskedStore(Vec, PtrCast, Align::None(), BoolMask);
// 'Replace uses' doesn't work for stores. Erase the original masked store.
IC.eraseInstFromFunction(II);
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 45b8f83d3877..413db1a256f8 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2904,7 +2904,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *V = I.getArgOperand(0);
Value *Addr = I.getArgOperand(1);
- const MaybeAlign Alignment(
+ const Align Alignment(
cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
Value *Mask = I.getArgOperand(3);
Value *Shadow = getShadow(V);
@@ -2921,21 +2921,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(Mask, &I);
}
- IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment ? Alignment->value() : 0,
- Mask);
+ IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);
if (MS.TrackOrigins) {
auto &DL = F.getParent()->getDataLayout();
paintOrigin(IRB, getOrigin(V), OriginPtr,
DL.getTypeStoreSize(Shadow->getType()),
- llvm::max(Alignment, kMinOriginAlignment));
+ std::max(Alignment, kMinOriginAlignment));
}
}
bool handleMaskedLoad(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *Addr = I.getArgOperand(0);
- const MaybeAlign Alignment(
+ const Align Alignment(
cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
Value *Mask = I.getArgOperand(2);
Value *PassThru = I.getArgOperand(3);
@@ -2945,7 +2944,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
- setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask,
+ setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Alignment, Mask,
getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 0f25ff8bb46a..6d2ebe83f921 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2343,7 +2343,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
Value *ShuffledMask = Builder.CreateShuffleVector(
BlockInMaskPart, Undefs, RepMask, "interleaved.mask");
NewStoreInstr = Builder.CreateMaskedStore(
- IVec, AddrParts[Part], Group->getAlignment(), ShuffledMask);
+ IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
}
else
NewStoreInstr = Builder.CreateAlignedStore(IVec, AddrParts[Part],
@@ -2449,8 +2449,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
}
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
if (isMaskRequired)
- NewSI = Builder.CreateMaskedStore(
- StoredVal, VecPtr, Alignment.value(), BlockInMaskParts[Part]);
+ NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
+ BlockInMaskParts[Part]);
else
NewSI =
Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment.value());
More information about the cfe-commits
mailing list