[llvm] 72b7761 - [IRBuilder] Add CreateMaskedExpandLoad and CreateMaskedCompressStore
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 14 19:20:32 PDT 2022
Author: Vitaly Buka
Date: 2022-09-14T19:18:52-07:00
New Revision: 72b776168c7c80d2035c7226488462dcffc97e75
URL: https://github.com/llvm/llvm-project/commit/72b776168c7c80d2035c7226488462dcffc97e75
DIFF: https://github.com/llvm/llvm-project/commit/72b776168c7c80d2035c7226488462dcffc97e75.diff
LOG: [IRBuilder] Add CreateMaskedExpandLoad and CreateMaskedCompressStore
Added:
Modified:
llvm/include/llvm/IR/IRBuilder.h
llvm/lib/IR/IRBuilder.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 8cf76d2de3fda..a718dc487386f 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -781,6 +781,15 @@ class IRBuilderBase {
CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
Value *Mask = nullptr);
+ /// Create a call to Masked Expand Load intrinsic
+ CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask = nullptr,
+ Value *PassThru = nullptr,
+ const Twine &Name = "");
+
+ /// Create a call to Masked Compress Store intrinsic
+ CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr,
+ Value *Mask = nullptr);
+
/// Create an assume intrinsic call that allows the optimizer to
/// assume that the provided condition will be true.
///
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 47967b12179dc..8a46a71186c89 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -700,6 +700,51 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
}
+/// Create a call to Masked Expand Load intrinsic
+/// \p Ty - vector type to load
+/// \p Ptr - base pointer for the load
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+/// \p PassThru - pass-through value that is used to fill the masked-off lanes
+/// of the result
+/// \p Name - name of the result variable
+CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
+ Value *Mask, Value *PassThru,
+ const Twine &Name) {
+ auto *PtrTy = cast<PointerType>(Ptr->getType());
+ assert(Ty->isVectorTy() && "Type should be vector");
+ assert(PtrTy->isOpaqueOrPointeeTypeMatches(
+ cast<FixedVectorType>(Ty)->getElementType()) &&
+ "Wrong element type");
+ assert(Mask && "Mask should not be all-ones (null)");
+ if (!PassThru)
+ PassThru = UndefValue::get(Ty);
+ Type *OverloadedTypes[] = {Ty};
+ Value *Ops[] = {Ptr, Mask, PassThru};
+ return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
+ OverloadedTypes, Name);
+}
+
+/// Create a call to Masked Compress Store intrinsic
+/// \p Val - data to be stored,
+/// \p Ptr - base pointer for the store
+/// \p Mask - vector of booleans which indicates what vector lanes should
+/// be accessed in memory
+CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr,
+ Value *Mask) {
+ auto *PtrTy = cast<PointerType>(Ptr->getType());
+ Type *DataTy = Val->getType();
+ assert(DataTy->isVectorTy() && "Val should be a vector");
+ assert(PtrTy->isOpaqueOrPointeeTypeMatches(
+ cast<FixedVectorType>(DataTy)->getElementType()) &&
+ "Wrong element type");
+ assert(Mask && "Mask should not be all-ones (null)");
+ Type *OverloadedTypes[] = {DataTy};
+ Value *Ops[] = {Val, Ptr, Mask};
+ return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
+ OverloadedTypes);
+}
+
template <typename T0>
static std::vector<Value *>
getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
More information about the llvm-commits
mailing list