[llvm] [IR] Add MemsetPatternInst to the MemIntrinsic hierarchy (PR #153301)
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 15 07:59:17 PDT 2025
https://github.com/preames updated https://github.com/llvm/llvm-project/pull/153301
>From 1d8a27e79d23eb44fa2d75277bc226f789f24e83 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Mon, 11 Aug 2025 14:04:56 -0700
Subject: [PATCH 1/2] [IR] Add MemsetPatternInst to the MemIntrinsic hierarchy
Previously, memset.pattern had not been considered a memory intrinsic
in terms of the IntrinsicInst class structure. This change doesn't
merge MemSetInst and MemSetPatternInst. That may come later, but for
now MemSetPatternInst is a parallel subclass to MemSetInst, and it's
only the MemIntrinsic class which covers all of them.
When I started to consider patches to extend optimizations for memset
to cover memset.pattern in e.g. instcombine, I realized I was going
to have to duplicate a whole bunch of code if we didn't merge it into
the class structure.
Unfortunately, this is a somewhat high risk patch. There are *a lot*
of places in the code base which abstract over the MemIntrinsic class.
I've auditted obvious usages, and added bailouts where the code
didn't seem like it naturally did the right thing for memset.pattern
(or I didn't know enough about the code to tell), but I might have
missed something.
This patch is definitely *not* NFC. I've included a couple test
cases for easy to exercise changes (where the old code did the right
thing for the new intrinsic), but I don't claim this to be exhaustive.
If any reviewer has a specific test they'd like to see added, let me
know. I'm also open to suggestions on how to restructure this in
a lower risk manner; I tried a couple options and basically decided
I was going to have to just bite the bullet here.
---
llvm/include/llvm/IR/IntrinsicInst.h | 14 ++------------
llvm/lib/Analysis/StackSafetyAnalysis.cpp | 3 ++-
llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp | 3 ++-
llvm/lib/CodeGen/SafeStack.cpp | 3 ++-
llvm/lib/Transforms/IPO/OpenMPOpt.cpp | 2 +-
.../Instrumentation/AddressSanitizer.cpp | 3 ++-
.../Instrumentation/HWAddressSanitizer.cpp | 3 ++-
.../Instrumentation/MemProfInstrumentation.cpp | 3 ++-
.../NumericalStabilitySanitizer.cpp | 3 ++-
.../Instrumentation/PGOMemOPSizeOpt.cpp | 3 +++
.../Instrumentation/ThreadSanitizer.cpp | 2 +-
.../Transforms/Instrumentation/TypeSanitizer.cpp | 6 ++++--
.../Instrumentation/ValueProfilePlugins.inc | 3 +++
.../Transforms/Scalar/DeadStoreElimination.cpp | 4 +++-
llvm/lib/Transforms/Scalar/GVN.cpp | 4 ++--
.../lib/Transforms/Scalar/InferAddressSpaces.cpp | 3 ++-
llvm/lib/Transforms/Scalar/NewGVN.cpp | 3 ++-
llvm/lib/Transforms/Scalar/SROA.cpp | 6 ++++--
.../AlignmentFromAssumptions/simple.ll | 13 +++++++++++++
.../CorrelatedValuePropagation/non-null.ll | 16 +++++++++++++++-
20 files changed, 69 insertions(+), 31 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 2e1389633d7e4..b3a272b3ccf45 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -1093,7 +1093,7 @@ template <class BaseCL> class MemSetBase : public BaseCL {
}
};
-/// This is the common base class for memset/memcpy/memmove.
+/// This is the common base class for memset(.pattern)/memcpy/memmove.
class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
enum { ARG_VOLATILE = 3 };
@@ -1125,6 +1125,7 @@ class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
case Intrinsic::memset:
case Intrinsic::memset_inline:
case Intrinsic::memcpy_inline:
+ case Intrinsic::experimental_memset_pattern:
return true;
default:
return false;
@@ -1157,18 +1158,7 @@ class MemSetInst : public MemSetBase<MemIntrinsic> {
/// Note that despite the inheritance, this is not part of the
/// MemIntrinsic hierachy in terms of isa/cast.
class MemSetPatternInst : public MemSetBase<MemIntrinsic> {
-private:
- enum { ARG_VOLATILE = 3 };
-
public:
- ConstantInt *getVolatileCst() const {
- return cast<ConstantInt>(getArgOperand(ARG_VOLATILE));
- }
-
- bool isVolatile() const { return !getVolatileCst()->isZero(); }
-
- void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }
-
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::experimental_memset_pattern;
diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
index 5e94e0bfe6738..343773dcfe9d3 100644
--- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
@@ -489,7 +489,8 @@ void StackSafetyLocalAnalysis::analyzeAllUses(Value *Ptr,
US.addRange(I, UnknownRange, /*IsSafe=*/false);
break;
}
- if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
+ if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I);
+ MI && !isa<MemSetPatternInst>(I)) {
auto AccessRange = getMemIntrinsicAccessRange(MI, UI, Ptr);
bool Safe = false;
if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 260ce8fb7aae0..68aa9067d72c7 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -2071,7 +2071,8 @@ getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) {
// with it.
if (const auto *SI = dyn_cast<StoreInst>(&I))
return at::getAssignmentInfo(Layout, SI);
- if (const auto *MI = dyn_cast<MemIntrinsic>(&I))
+ if (const auto *MI = dyn_cast<MemIntrinsic>(&I);
+ MI && !isa<MemSetPatternInst>(MI))
return at::getAssignmentInfo(Layout, MI);
// Alloca or non-store-like inst.
return std::nullopt;
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
index 908ed96172615..684d53d8c621e 100644
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -321,7 +321,8 @@ bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
if (I->isLifetimeStartOrEnd())
continue;
- if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
+ if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I);
+ MI && !isa<MemSetPatternInst>(I)) {
if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
LLVM_DEBUG(dbgs()
<< "[SafeStack] Unsafe alloca: " << *AllocaPtr
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 5e2247f2a88d0..06720a8fea8a3 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -3200,7 +3200,7 @@ ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
}
// Check the pointer(s) of a memory intrinsic explicitly.
- if (isa<MemIntrinsic>(&I)) {
+ if (isa<MemIntrinsic>(&I) && !isa<MemSetPatternInst>(&I)) {
if (!ED.EncounteredNonLocalSideEffect &&
AA::isPotentiallyAffectedByBarrier(A, I, *this))
ED.EncounteredNonLocalSideEffect = true;
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 50258af5e26c3..f31b9c48963c9 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -3061,7 +3061,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
((ClInvalidPointerPairs || ClInvalidPointerSub) &&
isInterestingPointerSubtraction(&Inst))) {
PointerComparisonsOrSubtracts.push_back(&Inst);
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst);
+ MI && !isa<MemSetPatternInst>(MI)) {
// ok, take it.
IntrinToInstrument.push_back(MI);
NumInsnsPerBB++;
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index fc34d14259d1f..f4181e18671fc 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1602,7 +1602,8 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst))
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst);
+ MI && !isa<MemSetPatternInst>(MI))
if (!ignoreMemIntrinsic(ORE, MI))
IntrinToInstrument.push_back(MI);
}
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp
index 3ae771a575f0f..d82522073b742 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp
@@ -618,7 +618,8 @@ bool MemProfiler::instrumentFunction(Function &F) {
// Fill the set of memory operations to instrument.
for (auto &BB : F) {
for (auto &Inst : BB) {
- if (isInterestingMemoryAccess(&Inst) || isa<MemIntrinsic>(Inst))
+ if (isInterestingMemoryAccess(&Inst) ||
+ (isa<MemIntrinsic>(Inst) && !isa<MemSetPatternInst>(Inst)))
ToInstrument.push_back(&Inst);
}
}
diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp
index d18c0d0d2d90d..15b1a3eb9dc77 100644
--- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp
@@ -1962,7 +1962,8 @@ void NumericalStabilitySanitizer::propagateShadowValues(
maybeAddSuffixForNsanInterface(CB);
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst)) {
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst);
+ MI && !isa<MemSetPatternInst>(MI)) {
instrumentMemIntrinsic(MI);
return;
}
diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
index ce1d9f1923d05..7fe5126fe4263 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
@@ -199,6 +199,9 @@ class MemOPSizeOpt : public InstVisitor<MemOPSizeOpt> {
// Not perform on constant length calls.
if (isa<ConstantInt>(Length))
return;
+ if (isa<MemSetPatternInst>(MI))
+ return; // not supported
+
WorkList.push_back(MemOp(&MI));
}
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 0d48a350254ee..aed87b8be83bf 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -535,7 +535,7 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
- if (isa<MemIntrinsic>(Inst))
+ if (isa<MemIntrinsic>(Inst) && !isa<MemSetPatternInst>(Inst))
MemIntrinCalls.push_back(&Inst);
HasCalls = true;
chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
diff --git a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
index 9471ae3a6c4e9..d95c04b402d91 100644
--- a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
@@ -493,7 +493,8 @@ void collectMemAccessInfo(
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
- if (isa<MemIntrinsic, LifetimeIntrinsic>(Inst))
+ if (isa<MemIntrinsic, LifetimeIntrinsic>(Inst) &&
+ !isa<MemSetPatternInst>(Inst))
MemTypeResetInsts.push_back(&Inst);
} else if (isa<AllocaInst>(Inst)) {
MemTypeResetInsts.push_back(&Inst);
@@ -804,7 +805,8 @@ bool TypeSanitizer::instrumentMemInst(Value *V, Instruction *ShadowBase,
ConstantInt::get(IntptrTy, DL.getTypeAllocSize(A->getParamByValType()));
} else {
auto *I = cast<Instruction>(V);
- if (auto *MI = dyn_cast<MemIntrinsic>(I)) {
+ if (auto *MI = dyn_cast<MemIntrinsic>(I);
+ MI && !isa<MemSetPatternInst>(MI)) {
if (MI->getDestAddressSpace() != 0)
return false;
diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
index a3d4e5367b9ab..bea8472d39420 100644
--- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
+++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
@@ -46,6 +46,9 @@ public:
if (isa<ConstantInt>(Length))
return;
+ if (isa<MemSetPatternInst>(MI))
+ return; // Not supported
+
Instruction *InsertPt = &MI;
Instruction *AnnotatedInst = &MI;
Candidates->emplace_back(CandidateInfo{Length, InsertPt, AnnotatedInst});
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 8093e44245d20..9195eec1e2920 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -622,6 +622,8 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
uint64_t &DeadSize, int64_t KillingStart,
uint64_t KillingSize, bool IsOverwriteEnd) {
auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
+ if (isa<MemSetPatternInst>(DeadI))
+ return false;
Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
// We assume that memet/memcpy operates in chunks of the "largest" native
@@ -1282,7 +1284,7 @@ struct DSEState {
if (auto *CB = dyn_cast<CallBase>(I)) {
// Don't remove volatile memory intrinsics.
if (auto *MI = dyn_cast<MemIntrinsic>(CB))
- return !MI->isVolatile();
+ return !MI->isVolatile() || !isa<MemSetPatternInst>(MI);
// Never remove dead lifetime intrinsics, e.g. because they are followed
// by a free.
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 7704e49c499da..92c4e39f9336b 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1378,9 +1378,9 @@ GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
}
// If the clobbering value is a memset/memcpy/memmove, see if we can
- // forward a value on from it.
+ // forward a value on from it. TODO: Support memset.pattern.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
- if (Address && !Load->isAtomic()) {
+ if (Address && !Load->isAtomic() && !isa<MemSetPatternInst>(DepInst)) {
int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
DepMI, DL);
if (Offset != -1)
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index a097d338a42ca..e0169fe7665d1 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1239,7 +1239,8 @@ void InferAddressSpacesImpl::performPointerReplacement(
return;
// Handle more complex cases like intrinsic that need to be remangled.
- if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
+ if (auto *MI = dyn_cast<MemIntrinsic>(CurUser);
+ MI && !isa<MemSetPatternInst>(MI)) {
if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
return;
}
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 9d4fb79416596..324b7dd75361b 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -1521,7 +1521,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
return createConstantExpression(PossibleConstant);
}
}
- } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
+ } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst);
+ DepMI && !isa<MemSetPatternInst>(DepMI)) {
int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
if (Offset >= 0) {
if (auto *PossibleConstant =
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index d6e27aa20730b..db3fef1224f44 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2126,7 +2126,8 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
Use *U = S.getUse();
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser());
+ MI && !isa<MemSetPatternInst>(MI)) {
if (MI->isVolatile())
return false;
if (!S.isSplittable())
@@ -2490,7 +2491,8 @@ static bool isIntegerWideningViableForSlice(const Slice &S,
// they are promotable.
return false;
}
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser());
+ MI && !isa<MemSetPatternInst>(MI)) {
if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
return false;
if (!S.isSplittable())
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll
index c2f2d21fafc40..f6ae17b70e669 100644
--- a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll
@@ -365,6 +365,19 @@ entry:
ret i32 0
}
+define i32 @moo4(ptr nocapture %a) {
+; CHECK-LABEL: define i32 @moo4
+; CHECK-SAME: (ptr captures(none) [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i16 32) ]
+; CHECK-NEXT: call void @llvm.experimental.memset.pattern.p0.i32.i64(ptr align 32 [[A]], i32 257, i64 10, i1 false)
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ tail call void @llvm.assume(i1 true) ["align"(ptr %a, i16 32)]
+ call void @llvm.experimental.memset.pattern(ptr align 4 %a, i32 257, i64 10, i1 false)
+ ret i32 0
+}
; Variable alignments appear to be legal, don't crash
define i32 @pr51680(ptr nocapture %a, i32 %align) {
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll b/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll
index 53a94e13a1763..4ac31a7ff45ff 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll
@@ -309,7 +309,7 @@ define void @test12(ptr %arg1, ptr %arg2) {
; CHECK: non_null:
; CHECK-NEXT: br label [[MERGE:%.*]]
; CHECK: null:
-; CHECK-NEXT: [[ANOTHER_ARG:%.*]] = load ptr, ptr [[ARG2:%.*]], align 8, !nonnull !0
+; CHECK-NEXT: [[ANOTHER_ARG:%.*]] = load ptr, ptr [[ARG2:%.*]], align 8, !nonnull [[META0:![0-9]+]]
; CHECK-NEXT: br label [[MERGE]]
; CHECK: merge:
; CHECK-NEXT: [[MERGED_ARG:%.*]] = phi ptr [ [[ANOTHER_ARG]], [[NULL]] ], [ [[ARG1]], [[NON_NULL]] ]
@@ -445,4 +445,18 @@ entry:
declare void @callee(ptr)
declare void @callee2(ptr noundef)
+define void @test_memset_pattern(ptr %dest) {
+; CHECK-LABEL: @test_memset_pattern(
+; CHECK-NEXT: call void @llvm.experimental.memset.pattern.p0.i32.i64(ptr [[DEST:%.*]], i32 257, i64 17, i1 false)
+; CHECK-NEXT: br label [[BB:%.*]]
+; CHECK: bb:
+; CHECK-NEXT: ret void
+;
+ call void @llvm.experimental.memset.pattern(ptr %dest, i32 257, i64 17, i1 false)
+ br label %bb
+bb:
+ icmp ne ptr %dest, null
+ ret void
+}
+
attributes #0 = { null_pointer_is_valid }
>From 399a28c8c71e84f101daf8ec222b06f824677f6e Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 15 Aug 2025 07:59:00 -0700
Subject: [PATCH 2/2] Address review comment
---
llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp | 2 ++
llvm/lib/CodeGen/SafeStack.cpp | 2 ++
llvm/lib/Transforms/IPO/OpenMPOpt.cpp | 2 ++
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp | 2 ++
llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp | 2 ++
.../lib/Transforms/Instrumentation/MemProfInstrumentation.cpp | 2 ++
.../Instrumentation/NumericalStabilitySanitizer.cpp | 2 ++
llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp | 2 ++
llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 2 ++
llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp | 4 ++++
llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc | 2 ++
llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp | 4 ++++
llvm/lib/Transforms/Scalar/GVN.cpp | 2 ++
llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp | 2 ++
llvm/lib/Transforms/Scalar/NewGVN.cpp | 2 ++
llvm/lib/Transforms/Scalar/SROA.cpp | 4 ++++
16 files changed, 38 insertions(+)
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 68aa9067d72c7..215f051457e35 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -2071,6 +2071,8 @@ getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) {
// with it.
if (const auto *SI = dyn_cast<StoreInst>(&I))
return at::getAssignmentInfo(Layout, SI);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (const auto *MI = dyn_cast<MemIntrinsic>(&I);
MI && !isa<MemSetPatternInst>(MI))
return at::getAssignmentInfo(Layout, MI);
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
index 684d53d8c621e..28e37b3686170 100644
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -321,6 +321,8 @@ bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
if (I->isLifetimeStartOrEnd())
continue;
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I);
MI && !isa<MemSetPatternInst>(I)) {
if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 06720a8fea8a3..9b78fa03889a7 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -3200,6 +3200,8 @@ ChangeStatus AAExecutionDomainFunction::updateImpl(Attributor &A) {
}
// Check the pointer(s) of a memory intrinsic explicitly.
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isa<MemIntrinsic>(&I) && !isa<MemSetPatternInst>(&I)) {
if (!ED.EncounteredNonLocalSideEffect &&
AA::isPotentiallyAffectedByBarrier(A, I, *this))
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index f31b9c48963c9..0eb3778ec68c8 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -3063,6 +3063,8 @@ bool AddressSanitizer::instrumentFunction(Function &F,
PointerComparisonsOrSubtracts.push_back(&Inst);
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst);
MI && !isa<MemSetPatternInst>(MI)) {
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
// ok, take it.
IntrinToInstrument.push_back(MI);
NumInsnsPerBB++;
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index f4181e18671fc..48a9e578993b8 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1602,6 +1602,8 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
getInterestingMemoryOperands(ORE, &Inst, TLI, OperandsToInstrument);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst);
MI && !isa<MemSetPatternInst>(MI))
if (!ignoreMemIntrinsic(ORE, MI))
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp
index d82522073b742..45b30b3b0c561 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfInstrumentation.cpp
@@ -618,6 +618,8 @@ bool MemProfiler::instrumentFunction(Function &F) {
// Fill the set of memory operations to instrument.
for (auto &BB : F) {
for (auto &Inst : BB) {
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isInterestingMemoryAccess(&Inst) ||
(isa<MemIntrinsic>(Inst) && !isa<MemSetPatternInst>(Inst)))
ToInstrument.push_back(&Inst);
diff --git a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp
index 15b1a3eb9dc77..dde424e074e9f 100644
--- a/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/NumericalStabilitySanitizer.cpp
@@ -1962,6 +1962,8 @@ void NumericalStabilitySanitizer::propagateShadowValues(
maybeAddSuffixForNsanInterface(CB);
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&Inst);
MI && !isa<MemSetPatternInst>(MI)) {
instrumentMemIntrinsic(MI);
diff --git a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
index 7fe5126fe4263..d97d1a50543a1 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp
@@ -199,6 +199,8 @@ class MemOPSizeOpt : public InstVisitor<MemOPSizeOpt> {
// Not perform on constant length calls.
if (isa<ConstantInt>(Length))
return;
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isa<MemSetPatternInst>(MI))
return; // not supported
diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index aed87b8be83bf..c3596447af2ee 100644
--- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -535,6 +535,8 @@ bool ThreadSanitizer::sanitizeFunction(Function &F,
else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isa<MemIntrinsic>(Inst) && !isa<MemSetPatternInst>(Inst))
MemIntrinCalls.push_back(&Inst);
HasCalls = true;
diff --git a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
index d95c04b402d91..8a5e592439306 100644
--- a/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/TypeSanitizer.cpp
@@ -493,6 +493,8 @@ void collectMemAccessInfo(
if (CallInst *CI = dyn_cast<CallInst>(&Inst))
maybeMarkSanitizerLibraryCallNoBuiltin(CI, &TLI);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isa<MemIntrinsic, LifetimeIntrinsic>(Inst) &&
!isa<MemSetPatternInst>(Inst))
MemTypeResetInsts.push_back(&Inst);
@@ -805,6 +807,8 @@ bool TypeSanitizer::instrumentMemInst(Value *V, Instruction *ShadowBase,
ConstantInt::get(IntptrTy, DL.getTypeAllocSize(A->getParamByValType()));
} else {
auto *I = cast<Instruction>(V);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (auto *MI = dyn_cast<MemIntrinsic>(I);
MI && !isa<MemSetPatternInst>(MI)) {
if (MI->getDestAddressSpace() != 0)
diff --git a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
index bea8472d39420..5039bf32a80b7 100644
--- a/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
+++ b/llvm/lib/Transforms/Instrumentation/ValueProfilePlugins.inc
@@ -46,6 +46,8 @@ public:
if (isa<ConstantInt>(Length))
return;
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isa<MemSetPatternInst>(MI))
return; // Not supported
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 9195eec1e2920..f6edc13c41fd8 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -622,6 +622,8 @@ static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
uint64_t &DeadSize, int64_t KillingStart,
uint64_t KillingSize, bool IsOverwriteEnd) {
auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (isa<MemSetPatternInst>(DeadI))
return false;
Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
@@ -1283,6 +1285,8 @@ struct DSEState {
if (auto *CB = dyn_cast<CallBase>(I)) {
// Don't remove volatile memory intrinsics.
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (auto *MI = dyn_cast<MemIntrinsic>(CB))
return !MI->isVolatile() || !isa<MemSetPatternInst>(MI);
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index 92c4e39f9336b..a8636b6564c04 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -1380,6 +1380,8 @@ GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it. TODO: Support memset.pattern.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (Address && !Load->isAtomic() && !isa<MemSetPatternInst>(DepInst)) {
int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
DepMI, DL);
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index e0169fe7665d1..eb6daf67c5fcc 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1239,6 +1239,8 @@ void InferAddressSpacesImpl::performPointerReplacement(
return;
// Handle more complex cases like intrinsic that need to be remangled.
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (auto *MI = dyn_cast<MemIntrinsic>(CurUser);
MI && !isa<MemSetPatternInst>(MI)) {
if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
index 324b7dd75361b..0452097e96c6a 100644
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -1523,6 +1523,8 @@ NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
}
} else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst);
DepMI && !isa<MemSetPatternInst>(DepMI)) {
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
if (Offset >= 0) {
if (auto *PossibleConstant =
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index db3fef1224f44..35d569d712619 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2126,6 +2126,8 @@ static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
Use *U = S.getUse();
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser());
MI && !isa<MemSetPatternInst>(MI)) {
if (MI->isVolatile())
@@ -2493,6 +2495,8 @@ static bool isIntegerWideningViableForSlice(const Slice &S,
}
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser());
MI && !isa<MemSetPatternInst>(MI)) {
+ // TODO: This code was written before memset.pattern was added to
+ // MemIntrinsic, consider how to update it
if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
return false;
if (!S.isSplittable())
More information about the llvm-commits
mailing list