[llvm] bc8a1ab - [Alignment][NFC] Use Align with CreateMaskedLoad
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 21 05:13:42 PST 2020
Author: Guillaume Chatelet
Date: 2020-01-21T14:13:22+01:00
New Revision: bc8a1ab26fba5d5635467b9d0fd7ad9a0fd5bc6e
URL: https://github.com/llvm/llvm-project/commit/bc8a1ab26fba5d5635467b9d0fd7ad9a0fd5bc6e
DIFF: https://github.com/llvm/llvm-project/commit/bc8a1ab26fba5d5635467b9d0fd7ad9a0fd5bc6e.diff
LOG: [Alignment][NFC] Use Align with CreateMaskedLoad
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
Reviewers: courbet
Subscribers: hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73087
Added:
Modified:
clang/lib/CodeGen/CGBuiltin.cpp
llvm/include/llvm/Analysis/VectorUtils.h
llvm/include/llvm/IR/IRBuilder.h
llvm/lib/IR/AutoUpgrade.cpp
llvm/lib/IR/IRBuilder.cpp
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 9986ea4cb94c..8d00d3d64f5c 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -9727,8 +9727,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
}
-static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, unsigned Align) {
+static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
@@ -9736,7 +9736,7 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
+ return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
@@ -10731,11 +10731,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi128_mask:
case X86::BI__builtin_ia32_loaddqudi256_mask:
case X86::BI__builtin_ia32_loaddqudi512_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
+ return EmitX86MaskedLoad(*this, Ops, Align::None());
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
- return EmitX86MaskedLoad(*this, Ops, 1);
+ return EmitX86MaskedLoad(*this, Ops, Align::None());
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
@@ -10748,11 +10748,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32load512_mask:
case X86::BI__builtin_ia32_movdqa64load128_mask:
case X86::BI__builtin_ia32_movdqa64load256_mask:
- case X86::BI__builtin_ia32_movdqa64load512_mask: {
- unsigned Align =
- getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
- return EmitX86MaskedLoad(*this, Ops, Align);
- }
+ case X86::BI__builtin_ia32_movdqa64load512_mask:
+ return EmitX86MaskedLoad(
+ *this, Ops,
+ getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
case X86::BI__builtin_ia32_expandloaddf128_mask:
case X86::BI__builtin_ia32_expandloaddf256_mask:
diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index dd42b4f5be93..8b465ca2983d 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -509,6 +509,7 @@ template <typename InstTy> class InterleaveGroup {
bool isReverse() const { return Reverse; }
uint32_t getFactor() const { return Factor; }
uint32_t getAlignment() const { return Alignment.value(); }
+ Align getAlign() const { return Alignment; }
uint32_t getNumMembers() const { return Members.size(); }
/// Try to insert a new member \p Instr with index \p Index and
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 6f6d6db31726..9341810c7b3b 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -727,7 +727,14 @@ class IRBuilderBase {
CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
/// Create a call to Masked Load intrinsic
- CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
+ LLVM_ATTRIBUTE_DEPRECATED(
+ CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask,
+ Value *PassThru = nullptr,
+ const Twine &Name = ""),
+ "Use the version that takes Align instead") {
+ return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name);
+ }
+ CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");
/// Create a call to Masked Store intrinsic
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 6e2beeb839b6..f587414b3294 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1257,18 +1257,19 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
Type *ValTy = Passthru->getType();
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy));
- unsigned Align =
- Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
+ const Align Alignment =
+ Aligned ? Align(cast<VectorType>(Passthru->getType())->getBitWidth() / 8)
+ : Align::None();
// If the mask is all ones just emit a regular store.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
- return Builder.CreateAlignedLoad(ValTy, Ptr, Align);
+ return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment);
// Convert the mask from an integer type to a vector of i1.
unsigned NumElts = Passthru->getType()->getVectorNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
- return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
+ return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru);
}
static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) {
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 32daf40f390b..2a6b2516d653 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -466,14 +466,14 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
}
/// Create a call to a Masked Load intrinsic.
-/// \p Ptr - base pointer for the load
-/// \p Align - alignment of the source location
-/// \p Mask - vector of booleans which indicates what vector lanes should
-/// be accessed in memory
-/// \p PassThru - pass-through value that is used to fill the masked-off lanes
-/// of the result
-/// \p Name - name of the result variable
-CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
+/// \p Ptr - base pointer for the load
+/// \p Alignment - alignment of the source location
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+/// \p PassThru - pass-through value that is used to fill the masked-off lanes
+/// of the result
+/// \p Name - name of the result variable
+CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
Value *Mask, Value *PassThru,
const Twine &Name) {
auto *PtrTy = cast<PointerType>(Ptr->getType());
@@ -483,7 +483,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align,
if (!PassThru)
PassThru = UndefValue::get(DataTy);
Type *OverloadedTypes[] = { DataTy, PtrTy };
- Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru};
+ Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
OverloadedTypes, Name);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 74ee88f404c9..a7aac58c795e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1331,7 +1331,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// The pass-through vector for an x86 masked load is a zero vector.
CallInst *NewMaskedLoad =
- IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
+ IC.Builder.CreateMaskedLoad(PtrCast, Align::None(), BoolMask, ZeroVec);
return IC.replaceInstUsesWith(II, NewMaskedLoad);
}
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 80acab307578..45b8f83d3877 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2945,9 +2945,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
- setShadow(&I, IRB.CreateMaskedLoad(
- ShadowPtr, Alignment ? Alignment->value() : 0, Mask,
- getShadow(PassThru), "_msmaskedld"));
+ setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask,
+ getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index b1650713d546..0f25ff8bb46a 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2263,7 +2263,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
: ShuffledMask;
}
NewLoad =
- Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlignment(),
+ Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(),
GroupMask, UndefVec, "wide.masked.vec");
}
else
@@ -2475,8 +2475,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
if (isMaskRequired)
NewLI = Builder.CreateMaskedLoad(
- VecPtr, Alignment.value(), BlockInMaskParts[Part],
- UndefValue::get(DataTy), "wide.masked.load");
+ VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy),
+ "wide.masked.load");
else
NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(),
"wide.load");
More information about the llvm-commits
mailing list