[llvm] [IRBuilder] Add Align argument for CreateMaskedExpandLoad and CreateMaskedCompressStore (PR #122878)
Sergey Kachkov via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 14 01:56:36 PST 2025
https://github.com/skachkov-sc created https://github.com/llvm/llvm-project/pull/122878
This patch adds possibility to specify alignment for llvm.masked.expandload/llvm.masked.compressstore intrinsics in IRBuilder (this is mostly NFC for now since it's only used in MemorySanitizer, but there is an intention to generate these intrinsics in the compiler passes, e.g. in LoopVectorizer)
>From bae4cff1240f073badc82d86a3fdf82e8bd2bfa9 Mon Sep 17 00:00:00 2001
From: Sergey Kachkov <sergey.kachkov at syntacore.com>
Date: Fri, 10 Jan 2025 15:38:59 +0300
Subject: [PATCH] [IRBuilder] Add Align argument for CreateMaskedExpandLoad and
CreateMaskedCompressStore
---
llvm/include/llvm/IR/IRBuilder.h | 5 +++--
llvm/lib/IR/IRBuilder.cpp | 20 ++++++++++++++-----
.../Instrumentation/MemorySanitizer.cpp | 9 ++++++---
3 files changed, 24 insertions(+), 10 deletions(-)
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 0332a6cc2e76ea..833c91fd974619 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -854,12 +854,13 @@ class IRBuilderBase {
Value *Mask = nullptr);
/// Create a call to Masked Expand Load intrinsic
- CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask = nullptr,
+ CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
+ Value *Mask = nullptr,
Value *PassThru = nullptr,
const Twine &Name = "");
/// Create a call to Masked Compress Store intrinsic
- CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr,
+ CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align,
Value *Mask = nullptr);
/// Return an all true boolean vector (mask) with \p NumElts lanes.
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 27b499e42a4e4c..15f34359cfbf50 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -644,13 +644,15 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
/// Create a call to Masked Expand Load intrinsic
/// \p Ty - vector type to load
/// \p Ptr - base pointer for the load
+/// \p Align - alignment for one element
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
/// \p PassThru - pass-through value that is used to fill the masked-off lanes
/// of the result
/// \p Name - name of the result variable
CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
- Value *Mask, Value *PassThru,
+ MaybeAlign Align, Value *Mask,
+ Value *PassThru,
const Twine &Name) {
assert(Ty->isVectorTy() && "Type should be vector");
assert(Mask && "Mask should not be all-ones (null)");
@@ -658,24 +660,32 @@ CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
PassThru = PoisonValue::get(Ty);
Type *OverloadedTypes[] = {Ty};
Value *Ops[] = {Ptr, Mask, PassThru};
- return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
- OverloadedTypes, Name);
+ CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
+ OverloadedTypes, Name);
+ if (Align)
+ CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *Align));
+ return CI;
}
/// Create a call to Masked Compress Store intrinsic
/// \p Val - data to be stored,
/// \p Ptr - base pointer for the store
+/// \p Align - alignment for one element
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr,
+ MaybeAlign Align,
Value *Mask) {
Type *DataTy = Val->getType();
assert(DataTy->isVectorTy() && "Val should be a vector");
assert(Mask && "Mask should not be all-ones (null)");
Type *OverloadedTypes[] = {DataTy};
Value *Ops[] = {Val, Ptr, Mask};
- return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
- OverloadedTypes);
+ CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
+ OverloadedTypes);
+ if (Align)
+ CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), *Align));
+ return CI;
}
template <typename T0>
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 429e323b6b7c24..e82dfa7b75cb72 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3542,6 +3542,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleMaskedExpandLoad(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *Ptr = I.getArgOperand(0);
+ MaybeAlign Align = I.getParamAlign(0);
Value *Mask = I.getArgOperand(1);
Value *PassThru = I.getArgOperand(2);
@@ -3561,8 +3562,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
auto [ShadowPtr, OriginPtr] =
getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ false);
- Value *Shadow = IRB.CreateMaskedExpandLoad(
- ShadowTy, ShadowPtr, Mask, getShadow(PassThru), "_msmaskedexpload");
+ Value *Shadow =
+ IRB.CreateMaskedExpandLoad(ShadowTy, ShadowPtr, Align, Mask,
+ getShadow(PassThru), "_msmaskedexpload");
setShadow(&I, Shadow);
@@ -3574,6 +3576,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *Values = I.getArgOperand(0);
Value *Ptr = I.getArgOperand(1);
+ MaybeAlign Align = I.getParamAlign(1);
Value *Mask = I.getArgOperand(2);
if (ClCheckAccessAddress) {
@@ -3587,7 +3590,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
auto [ShadowPtr, OriginPtrs] =
getShadowOriginPtr(Ptr, IRB, ElementShadowTy, {}, /*isStore*/ true);
- IRB.CreateMaskedCompressStore(Shadow, ShadowPtr, Mask);
+ IRB.CreateMaskedCompressStore(Shadow, ShadowPtr, Align, Mask);
// TODO: Store origins.
}
More information about the llvm-commits
mailing list