[llvm] d0a7cc7 - [Alignment][NFC] Use Align with CreateMaskedScatter/Gather
Guillaume Chatelet via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 27 01:25:03 PST 2020
Author: Guillaume Chatelet
Date: 2020-01-27T10:17:14+01:00
New Revision: d0a7cc717734ca85e9ad652671d8dfa2456243a7
URL: https://github.com/llvm/llvm-project/commit/d0a7cc717734ca85e9ad652671d8dfa2456243a7
DIFF: https://github.com/llvm/llvm-project/commit/d0a7cc717734ca85e9ad652671d8dfa2456243a7.diff
LOG: [Alignment][NFC] Use Align with CreateMaskedScatter/Gather
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790
This patch shows that CreateMaskedScatter/CreateMaskedGather can only take positive non zero alignment values.
Reviewers: courbet
Subscribers: hiraditya, llvm-commits, delena
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D73361
Added:
Modified:
llvm/include/llvm/IR/IRBuilder.h
llvm/lib/IR/IRBuilder.cpp
llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 0bcb3eba342f..7d75cd68bea0 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -770,13 +770,30 @@ class IRBuilderBase {
Value *Mask);
/// Create a call to Masked Gather intrinsic
- CallInst *CreateMaskedGather(Value *Ptrs, unsigned Align,
- Value *Mask = nullptr,
- Value *PassThru = nullptr,
- const Twine& Name = "");
+ LLVM_ATTRIBUTE_DEPRECATED(
+ CallInst *CreateMaskedGather(Value *Ptrs, unsigned Alignment,
+ Value *Mask = nullptr,
+ Value *PassThru = nullptr,
+ const Twine &Name = ""),
+ "Use the version that takes Align instead") {
+ return CreateMaskedGather(Ptrs, Align(Alignment), Mask, PassThru, Name);
+ }
+
+ /// Create a call to Masked Gather intrinsic
+ CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
+ Value *Mask = nullptr, Value *PassThru = nullptr,
+ const Twine &Name = "");
+
+ /// Create a call to Masked Scatter intrinsic
+ LLVM_ATTRIBUTE_DEPRECATED(
+ CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Alignment,
+ Value *Mask = nullptr),
+ "Use the version that takes Align instead") {
+ return CreateMaskedScatter(Val, Ptrs, Align(Alignment), Mask);
+ }
/// Create a call to Masked Scatter intrinsic
- CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, unsigned Align,
+ CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
Value *Mask = nullptr);
/// Create an assume intrinsic call that allows the optimizer to
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 5d6f839cd1a0..f59d68d1db00 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -523,9 +523,9 @@ CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
-CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, unsigned Align,
- Value *Mask, Value *PassThru,
- const Twine& Name) {
+CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
+ Value *Mask, Value *PassThru,
+ const Twine &Name) {
auto PtrsTy = cast<VectorType>(Ptrs->getType());
auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
unsigned NumElts = PtrsTy->getVectorNumElements();
@@ -539,7 +539,7 @@ CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, unsigned Align,
PassThru = UndefValue::get(DataTy);
Type *OverloadedTypes[] = {DataTy, PtrsTy};
- Value * Ops[] = {Ptrs, getInt32(Align), Mask, PassThru};
+ Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
// We specify only one type when we create this intrinsic. Types of other
// arguments are derived from this type.
@@ -555,7 +555,7 @@ CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, unsigned Align,
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
- unsigned Align, Value *Mask) {
+ Align Alignment, Value *Mask) {
auto PtrsTy = cast<VectorType>(Ptrs->getType());
auto DataTy = cast<VectorType>(Data->getType());
unsigned NumElts = PtrsTy->getVectorNumElements();
@@ -572,7 +572,7 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
NumElts));
Type *OverloadedTypes[] = {DataTy, PtrsTy};
- Value * Ops[] = {Data, Ptrs, getInt32(Align), Mask};
+ Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
// We specify only one type when we create this intrinsic. Types of other
// arguments are derived from this type.
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 2c9b9bdb4c08..e3274de0a801 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2437,8 +2437,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
if (CreateGatherScatter) {
Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
Value *VectorGep = State.get(Addr, Part);
- NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep,
- Alignment.value(), MaskPart);
+ NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
+ MaskPart);
} else {
if (Reverse) {
// If we store to reverse consecutive memory locations, then we need
@@ -2467,7 +2467,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
if (CreateGatherScatter) {
Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
Value *VectorGep = State.get(Addr, Part);
- NewLI = Builder.CreateMaskedGather(VectorGep, Alignment.value(), MaskPart,
+ NewLI = Builder.CreateMaskedGather(VectorGep, Alignment, MaskPart,
nullptr, "wide.masked.gather");
addMetadata(NewLI, LI);
} else {
More information about the llvm-commits
mailing list