[llvm] [DebugInfo] Handle additional types of stores in assignment tracking (PR #129070)
Stephen Tozer via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 7 04:40:27 PST 2025
https://github.com/SLTozer updated https://github.com/llvm/llvm-project/pull/129070
>From 208e53587c70d142b147b869c9a25c68bc7710a7 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Tue, 25 Feb 2025 16:34:03 +0000
Subject: [PATCH 1/6] Handle non-contiguous stores in assignment tracking
---
llvm/include/llvm/IR/DebugInfo.h | 2 +
.../CodeGen/AssignmentTrackingAnalysis.cpp | 114 +++++++++++++++++-
llvm/lib/IR/DebugInfo.cpp | 18 +++
llvm/lib/IR/Verifier.cpp | 15 ++-
4 files changed, 143 insertions(+), 6 deletions(-)
diff --git a/llvm/include/llvm/IR/DebugInfo.h b/llvm/include/llvm/IR/DebugInfo.h
index 73f45c3769be4..a572c277b19b6 100644
--- a/llvm/include/llvm/IR/DebugInfo.h
+++ b/llvm/include/llvm/IR/DebugInfo.h
@@ -355,6 +355,8 @@ std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
const StoreInst *SI);
std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
const AllocaInst *AI);
+std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
+ const VPIntrinsic *VPI);
} // end namespace at
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index dbc724629d3be..f16e6a75d7394 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -1103,6 +1103,8 @@ class AssignmentTrackingLowering {
using UntaggedStoreAssignmentMap =
DenseMap<const Instruction *,
SmallVector<std::pair<VariableID, at::AssignmentInfo>>>;
+ using NonContiguousStoreAssignmentMap =
+ DenseMap<const Instruction *, SmallVector<VariableID>>;
private:
/// The highest numbered VariableID for partially promoted variables plus 1,
@@ -1113,6 +1115,9 @@ class AssignmentTrackingLowering {
/// Map untagged stores to the variable fragments they assign to. Used by
/// processUntaggedInstruction.
UntaggedStoreAssignmentMap UntaggedStoreVars;
+ /// Map untagged non-contiguous stores (e.g. strided/masked store intrinsics)
+ /// to the variables they may assign to. Used by processUntaggedInstruction.
+ NonContiguousStoreAssignmentMap NonContiguousStoreVars;
// Machinery to defer inserting dbg.values.
using InstInsertMap = MapVector<VarLocInsertPt, SmallVector<VarLocInfo>>;
@@ -1355,6 +1360,8 @@ class AssignmentTrackingLowering {
/// Update \p LiveSet after encountering an instruciton without a DIAssignID
/// attachment, \p I.
void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet);
+ void processNonContiguousStoreToVariable(Instruction &I, VariableID &Var,
+ BlockInfo *LiveSet);
void processDbgAssign(AssignRecord Assign, BlockInfo *LiveSet);
void processDbgVariableRecord(DbgVariableRecord &DVR, BlockInfo *LiveSet);
void processDbgValue(
@@ -1604,6 +1611,45 @@ void AssignmentTrackingLowering::processNonDbgInstruction(
processUntaggedInstruction(I, LiveSet);
}
+void AssignmentTrackingLowering::processNonContiguousStoreToVariable(
+ Instruction &I, VariableID &Var, BlockInfo *LiveSet) {
+ // We may have assigned to some unknown fragment of the variable, so
+ // treat the memory assignment as unknown for now.
+ addMemDef(LiveSet, Var, Assignment::makeNoneOrPhi());
+ // If we weren't already using a memory location, we don't need to do
+ // anything more.
+ if (getLocKind(LiveSet, Var) != LocKind::Mem)
+ return;
+ // If there is a live debug value for this variable, fall back to using
+ // that
+ Assignment DbgAV = LiveSet->getAssignment(BlockInfo::Debug, Var);
+ if (DbgAV.Status != Assignment::NoneOrPhi && DbgAV.Source) {
+ LLVM_DEBUG(dbgs() << "Switching to fallback debug value: ";
+ DbgAV.dump(dbgs()); dbgs() << "\n");
+ setLocKind(LiveSet, Var, LocKind::Val);
+ emitDbgValue(LocKind::Val, DbgAV.Source, &I);
+ return;
+ }
+ // Otherwise, find a suitable insert point, before the next instruction or
+ // DbgRecord after I.
+ auto InsertBefore = getNextNode(&I);
+ assert(InsertBefore && "Shouldn't be inserting after a terminator");
+
+ // Get DILocation for this unrecorded assignment.
+ DebugVariable V = FnVarLocs->getVariable(Var);
+ DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt());
+ const DILocation *DILoc = DILocation::get(
+ Fn.getContext(), 0, 0, V.getVariable()->getScope(), InlinedAt);
+
+ VarLocInfo VarLoc;
+ VarLoc.VariableID = static_cast<VariableID>(Var);
+ VarLoc.Expr = DIExpression::get(I.getContext(), {});
+ VarLoc.Values = RawLocationWrapper(
+ ValueAsMetadata::get(PoisonValue::get(Type::getInt1Ty(I.getContext()))));
+ VarLoc.DL = DILoc;
+ InsertBeforeMap[InsertBefore].push_back(VarLoc);
+}
+
void AssignmentTrackingLowering::processUntaggedInstruction(
Instruction &I, AssignmentTrackingLowering::BlockInfo *LiveSet) {
// Interpret stack stores that are not tagged as an assignment in memory for
@@ -1619,8 +1665,23 @@ void AssignmentTrackingLowering::processUntaggedInstruction(
// "early", for example.
assert(!I.hasMetadata(LLVMContext::MD_DIAssignID));
auto It = UntaggedStoreVars.find(&I);
- if (It == UntaggedStoreVars.end())
+ if (It == UntaggedStoreVars.end()) {
+ // It is possible that we have an untagged non-contiguous store, which we do
+ // not currently support - in this case we should undef the stack location
+ // of the variable, as if we had a tagged store that did not match the
+ // current assignment.
+ // FIXME: It should be possible to support non-contiguous stores, but it
+ // would require more extensive changes to our representation of assignments
+ // which assumes a single offset+size.
+ if (auto UnhandledStoreIt = NonContiguousStoreVars.find(&I);
+ UnhandledStoreIt != NonContiguousStoreVars.end()) {
+ LLVM_DEBUG(dbgs() << "Processing untagged non-contiguous store " << I
+ << "\n");
+ for (auto &Var : UnhandledStoreIt->second)
+ processNonContiguousStoreToVariable(I, Var, LiveSet);
+ }
return; // No variables associated with the store destination.
+ }
LLVM_DEBUG(dbgs() << "processUntaggedInstruction on UNTAGGED INST " << I
<< "\n");
@@ -2119,10 +2180,32 @@ getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) {
return at::getAssignmentInfo(Layout, SI);
if (const auto *MI = dyn_cast<MemIntrinsic>(&I))
return at::getAssignmentInfo(Layout, MI);
+ if (const auto *VPI = dyn_cast<VPIntrinsic>(&I))
+ return at::getAssignmentInfo(Layout, VPI);
// Alloca or non-store-like inst.
return std::nullopt;
}
+AllocaInst *getNonContiguousStore(const Instruction &I,
+ const DataLayout &Layout) {
+ auto *II = dyn_cast<IntrinsicInst>(&I);
+ if (!II)
+ return nullptr;
+ Intrinsic::ID ID = II->getIntrinsicID();
+ if (ID != Intrinsic::experimental_vp_strided_store &&
+ ID != Intrinsic::masked_store && ID != Intrinsic::vp_scatter &&
+ ID != Intrinsic::masked_scatter)
+ return nullptr;
+ Value *MemOp = II->getArgOperand(1);
+ // We don't actually use the constant offsets for now, but we may in future,
+ // and the non-accumulating versions do not support a vector of pointers.
+ APInt Offset(Layout.getIndexTypeSizeInBits(MemOp->getType()), 0);
+ Value *Base = MemOp->stripAndAccumulateConstantOffsets(Layout, Offset, true);
+ // For Base pointers that are not a single alloca value we don't need to do
+ // anything, and simply return nullptr.
+ return dyn_cast<AllocaInst>(Base);
+}
+
DbgDeclareInst *DynCastToDbgDeclare(DbgVariableIntrinsic *DVI) {
return dyn_cast<DbgDeclareInst>(DVI);
}
@@ -2145,7 +2228,8 @@ DbgVariableRecord *DynCastToDbgDeclare(DbgVariableRecord *DVR) {
/// subsequent variables are either stack homed or fully promoted.
///
/// Finally, populate UntaggedStoreVars with a mapping of untagged stores to
-/// the stored-to variable fragments.
+/// the stored-to variable fragments, and NonContiguousStoreVars with a mapping
+/// of untagged non-contiguous stores to the stored-to variable aggregates.
///
/// These tasks are bundled together to reduce the number of times we need
/// to iterate over the function as they can be achieved together in one pass.
@@ -2153,6 +2237,8 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
Function &Fn, FunctionVarLocsBuilder *FnVarLocs,
const DenseSet<DebugAggregate> &VarsWithStackSlot,
AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars,
+ AssignmentTrackingLowering::NonContiguousStoreAssignmentMap
+ &NonContiguousStoreVars,
unsigned &TrackedVariablesVectorSize) {
DenseSet<DebugVariable> Seen;
// Map of Variable: [Fragments].
@@ -2161,7 +2247,8 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
// - dbg.declare -> add single location variable record
// - dbg.* -> Add fragments to FragmentMap
// - untagged store -> Add fragments to FragmentMap and update
- // UntaggedStoreVars.
+ // UntaggedStoreVars, or add to NonContiguousStoreVars if
+ // we can't determine the fragment overlap.
// We need to add fragments for untagged stores too so that we can correctly
// clobber overlapped fragment locations later.
SmallVector<DbgDeclareInst *> InstDeclares;
@@ -2224,6 +2311,25 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
HandleDbgAssignForStore(DAI);
for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Info->Base))
HandleDbgAssignForStore(DVR);
+ } else if (auto *AI = getNonContiguousStore(I, Fn.getDataLayout())) {
+ // Find markers linked to this alloca.
+ auto HandleDbgAssignForNonContiguousStore = [&](auto *Assign) {
+ // Because we can't currently represent the fragment info for this
+ // store, we treat it as an unusable store to the whole variable.
+ DebugVariable DV =
+ DebugVariable(Assign->getVariable(), std::nullopt,
+ Assign->getDebugLoc().getInlinedAt());
+ DebugAggregate DA = {DV.getVariable(), DV.getInlinedAt()};
+ if (!VarsWithStackSlot.contains(DA))
+ return;
+
+ // Cache this info for later.
+ NonContiguousStoreVars[&I].push_back(FnVarLocs->insertVariable(DV));
+ };
+ for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(AI))
+ HandleDbgAssignForNonContiguousStore(DAI);
+ for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(AI))
+ HandleDbgAssignForNonContiguousStore(DVR);
}
}
}
@@ -2299,7 +2405,7 @@ bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
// appears to be rare occurance.
VarContains = buildOverlapMapAndRecordDeclares(
Fn, FnVarLocs, *VarsWithStackSlot, UntaggedStoreVars,
- TrackedVariablesVectorSize);
+ NonContiguousStoreVars, TrackedVariablesVectorSize);
// Prepare for traversal.
ReversePostOrderTraversal<Function *> RPOT(&Fn);
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index cc36b71190ce2..185157f5b8eaa 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -2062,6 +2062,24 @@ std::optional<AssignmentInfo> at::getAssignmentInfo(const DataLayout &DL,
return getAssignmentInfoImpl(DL, AI, SizeInBits);
}
+std::optional<AssignmentInfo> at::getAssignmentInfo(const DataLayout &DL,
+ const VPIntrinsic *VPI) {
+ auto *Dest = VPI->getMemoryPointerParam();
+ if (!Dest)
+ return {};
+ // FIXME: It should be possible to deal with strided store and scatter
+ // intrinsics as well, but the existing AssignmentInfo structure cannot
+ // sufficiently describe them.
+ if (VPI->getIntrinsicID() == Intrinsic::experimental_vp_strided_store ||
+ VPI->getIntrinsicID() == Intrinsic::vp_scatter)
+ return {};
+ assert(VPI->getMemoryDataParam() &&
+ "Memory VPIntrinsic missing a data param?");
+ TypeSize SizeInBits =
+ DL.getTypeSizeInBits(VPI->getMemoryDataParam()->getType());
+ return getAssignmentInfoImpl(DL, Dest, SizeInBits);
+}
+
/// Returns nullptr if the assignment shouldn't be attributed to this variable.
static void emitDbgAssign(AssignmentInfo Info, Value *Val, Value *Dest,
Instruction &StoreLikeInst, const VarRecord &VarRec,
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index b4f9273fea9fb..7326efbf757e5 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4956,8 +4956,19 @@ void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
- bool ExpectedInstTy =
- isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<MemIntrinsic>(I);
+ // DIAssignID metadata must be attached to either an alloca or some form of
+ // store/memory-writing instruction.
+ // FIXME: Is there any simpler way to express this property than manually
+ // enumerating all instructions that could perform a store?
+ bool ExpectedInstTy = isa<AllocaInst>(I) || isa<StoreInst>(I);
+ if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
+ const static Intrinsic::ID StoreIntrinsics[] = {
+ Intrinsic::vp_store, Intrinsic::vp_scatter,
+ Intrinsic::experimental_vp_strided_store, Intrinsic::masked_store,
+ Intrinsic::masked_scatter};
+ ExpectedInstTy |= is_contained(StoreIntrinsics, II->getIntrinsicID()) ||
+ isa<MemIntrinsic>(II);
+ }
CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
I, MD);
// Iterate over the MetadataAsValue uses of the DIAssignID - these should
>From 5caeff084b388441e175d7f10ce50840f90ca3d2 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Thu, 27 Feb 2025 15:20:02 +0000
Subject: [PATCH 2/6] Add tests
---
.../RISCV/di-assignment-tracking-vector.ll | 65 +++++++++++++++++++
.../test/Verifier/diassignid-vector-stores.ll | 33 ++++++++++
2 files changed, 98 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
create mode 100644 llvm/test/Verifier/diassignid-vector-stores.ll
diff --git a/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll b/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
new file mode 100644
index 0000000000000..0be9fcd830fd9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
@@ -0,0 +1,65 @@
+; RUN: llc -mtriple=riscv64 < %s -o - | FileCheck %s --implicit-check-not=DEBUG_VALUE
+
+;; Verify that tagged and untagged non-contiguous stores are handled correctly
+;; by assignment tracking.
+;; * The store to "i" is untagged, and results in the memory location being
+;; dropped in favour of the debug value 1010 after the store.
+;; * The store to "j" is tagged with a corresponding dbg_assign, which allows
+;; us to keep using the memory location.
+
+; CHECK-LABEL: foo:
+; CHECK-NEXT: .Lfunc_begin0:
+; CHECK: # %bb.0
+; CHECK: addi a1, sp, 48
+; CHECK-NEXT: #DEBUG_VALUE: foo:i <- [DW_OP_deref] $x12
+; CHECK-NEXT: #DEBUG_VALUE: foo:j <- [DW_OP_deref] $x12
+; CHECK: vsse32.v
+; CHECK-NEXT: #DEBUG_VALUE: foo:i <- 1010
+; CHECK-NEXT: vsse32.v
+
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+define void @foo() #0 !dbg !5 {
+entry:
+ %i = alloca i64, align 8, !DIAssignID !6
+ %j = alloca i64, align 8, !DIAssignID !12
+ %sar_height.i = getelementptr i8, ptr %i, i64 24
+ store ptr %sar_height.i, ptr null, align 8
+ %vui.i = getelementptr i8, ptr %i, i64 44
+ %0 = load i32, ptr %vui.i, align 4
+ %sar_width.i = getelementptr i8, ptr %i, i64 20
+ %i_sar_width.i = getelementptr i8, ptr %i, i64 48
+ %j_sar_width.j = getelementptr i8, ptr %j, i64 48
+ #dbg_assign(i32 1010, !7, !DIExpression(), !6, ptr %i_sar_width.i, !DIExpression(), !9)
+ #dbg_assign(i32 2121, !17, !DIExpression(), !12, ptr %i_sar_width.i, !DIExpression(), !9)
+ %1 = load <2 x i32>, ptr %sar_width.i, align 4
+ call void @llvm.experimental.vp.strided.store.v2i32.p0.i64(<2 x i32> %1, ptr align 4 %i_sar_width.i, i64 -4, <2 x i1> splat (i1 true), i32 2)
+ call void @llvm.experimental.vp.strided.store.v2i32.p0.i64(<2 x i32> %1, ptr align 4 %j_sar_width.j, i64 -4, <2 x i1> splat (i1 true), i32 2), !DIAssignID !13
+ #dbg_assign(i32 1010, !7, !DIExpression(), !14, ptr %i_sar_width.i, !DIExpression(), !9)
+ #dbg_assign(i32 2121, !17, !DIExpression(), !13, ptr %i_sar_width.i, !DIExpression(), !9)
+ ret void
+}
+
+attributes #0 = { "target-features"="+64bit,+a,+c,+d,+f,+m,+relax,+v,+zaamo,+zalrsc,+zicsr,+zifencei,+zmmul,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b,-b,-e,-experimental-sdext,-experimental-sdtrig,-experimental-smctr,-experimental-ssctr,-experimental-svukte,-experimental-xqcia,-experimental-xqciac,-experimental-xqcicli,-experimental-xqcicm,-experimental-xqcics,-experimental-xqcicsr,-experimental-xqciint,-experimental-xqcilo,-experimental-xqcilsm,-experimental-xqcisls,-experimental-zalasr,-experimental-zicfilp,-experimental-zicfiss,-experimental-zvbc32e,-experimental-zvkgs,-h,-sha,-shcounterenw,-shgatpa,-shtvala,-shvsatpa,-shvstvala,-shvstvecd,-smaia,-smcdeleg,-smcsrind,-smdbltrp,-smepmp,-smmpm,-smnpm,-smrnmi,-smstateen,-ssaia,-ssccfg,-ssccptr,-sscofpmf,-sscounterenw,-sscsrind,-ssdbltrp,-ssnpm,-sspm,-ssqosid,-ssstateen,-ssstrict,-sstc,-sstvala,-sstvecd,-ssu64xl,-supm,-svade,-svadu,-svbare,-svinval,-svnapot,-svpbmt,-svvptc,-xcvalu,-xcvbi,-xcvbitmanip,-xcvelw,-xcvmac,-xcvmem,-xcvsimd,-xmipscmove,-xmipslsp,-xsfcease,-xsfvcp,-xsfvfnrclipxfqf,-xsfvfwmaccqqq,-xsfvqmaccdod,-xsfvqmaccqoq,-xsifivecdiscarddlone,-xsifivecflushdlone,-xtheadba,-xtheadbb,-xtheadbs,-xtheadcmo,-xtheadcondmov,-xtheadfmemidx,-xtheadmac,-xtheadmemidx,-xtheadmempair,-xtheadsync,-xtheadvdot,-xventanacondops,-xwchc,-za128rs,-za64rs,-zabha,-zacas,-zama16b,-zawrs,-zba,-zbb,-zbc,-zbkb,-zbkc,-zbkx,-zbs,-zca,-zcb,-zcd,-zce,-zcf,-zcmop,-zcmp,-zcmt,-zdinx,-zfa,-zfbfmin,-zfh,-zfhmin,-zfinx,-zhinx,-zhinxmin,-zic64b,-zicbom,-zicbop,-zicboz,-ziccamoa,-ziccif,-zicclsm,-ziccrse,-zicntr,-zicond,-zihintntl,-zihintpause,-zihpm,-zimop,-zk,-zkn,-zknd,-zkne,-zknh,-zkr,-zks,-zksed,-zksh,-zkt,-ztso,-zvbb,-zvbc,-zvfbfmin,-zvfbfwma,-zvfh,-zvfhmin,-zvkb,-zvkg,-zvkn,-zvknc,-zvkned,-zvkng,-zvknha,-zvknhb,-zvks,-zvksc,-zvksed,-zvksg,-zvksh,-zvkt,-zvl1024b,-zvl16384b,-zvl2048b,-zvl256b,-zvl32768b,-zvl4096b,-zvl512b,-zvl65536b,-zvl8192b" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C11, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, producer: "clang version 21.0.0git")
+!1 = !DIFile(filename: "test.c", directory: "/")
+!2 = !{}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+!5 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: !1, file: !1, line: 1, scopeLine: 1, type: !10, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11)
+!6 = distinct !DIAssignID()
+!7 = !DILocalVariable(name: "i", scope: !5, file: !1, line: 7, type: !8)
+!8 = !DIBasicType(name: "int32_t", size: 32, encoding: DW_ATE_signed)
+!9 = !DILocation(line: 5, scope: !5)
+!10 = !DISubroutineType(types: !2)
+!11 = !{!7, !17}
+!12 = distinct !DIAssignID()
+!13 = distinct !DIAssignID()
+!14 = distinct !DIAssignID()
+!17 = !DILocalVariable(name: "j", scope: !5, file: !1, line: 7, type: !8)
diff --git a/llvm/test/Verifier/diassignid-vector-stores.ll b/llvm/test/Verifier/diassignid-vector-stores.ll
new file mode 100644
index 0000000000000..7fe1ee85e6c9d
--- /dev/null
+++ b/llvm/test/Verifier/diassignid-vector-stores.ll
@@ -0,0 +1,33 @@
+; RUN: llvm-as -disable-output <%s 2>&1 | FileCheck %s --implicit-check-not="attached to unexpected instruction kind"
+;; Check that we allow vector store intrinsics to have !DIAssignID attachments,
+;; but we do not allow non-store intrinsics to have them.
+
+; CHECK: !DIAssignID attached to unexpected instruction kind
+; CHECK-NEXT: @llvm.vp.load.v2i8.p0
+
+define void @f() !dbg !5 {
+ call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr undef, <2 x i1> undef, i32 undef), !DIAssignID !6
+ call void @llvm.vp.scatter.v2i8.v2p0(<2 x i8> undef, <2 x ptr> undef, <2 x i1> undef, i32 undef), !DIAssignID !7
+ call void @llvm.experimental.vp.strided.store.v2i8.i64(<2 x i8> undef, ptr undef, i64 undef, <2 x i1> undef, i32 undef), !DIAssignID !8
+ call void @llvm.masked.store.v2i8.p0(<2 x i8> undef, ptr undef, i32 1, <2 x i1> undef), !DIAssignID !9
+ call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> undef, <2 x ptr> undef, i32 1, <2 x i1> undef), !DIAssignID !10
+ %r = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr undef, <2 x i1> undef, i32 undef), !DIAssignID !11
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!llvm.dbg.cu = !{!1}
+
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+!1 = distinct !DICompileUnit(language: DW_LANG_Swift, producer: "clang",
+ file: !2, emissionKind: 2)
+!2 = !DIFile(filename: "path/to/file", directory: "/path/to/dir")
+!3 = !{null}
+!4 = !DISubroutineType(types: !3)
+!5 = distinct !DISubprogram(name: "f", scope: !2, file: !2, line: 1, type: !4, scopeLine: 2, unit: !1)
+!6 = distinct !DIAssignID()
+!7 = distinct !DIAssignID()
+!8 = distinct !DIAssignID()
+!9 = distinct !DIAssignID()
+!10 = distinct !DIAssignID()
+!11 = distinct !DIAssignID()
>From 52b2a068797d6a6ca430b0925834ba73958b5405 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Thu, 27 Feb 2025 15:44:25 +0000
Subject: [PATCH 3/6] Don't try to get info from vp.stores; needs work for
alignment
---
llvm/include/llvm/IR/DebugInfo.h | 2 -
.../CodeGen/AssignmentTrackingAnalysis.cpp | 53 +++++++++----------
llvm/lib/IR/DebugInfo.cpp | 18 -------
3 files changed, 24 insertions(+), 49 deletions(-)
diff --git a/llvm/include/llvm/IR/DebugInfo.h b/llvm/include/llvm/IR/DebugInfo.h
index a572c277b19b6..73f45c3769be4 100644
--- a/llvm/include/llvm/IR/DebugInfo.h
+++ b/llvm/include/llvm/IR/DebugInfo.h
@@ -355,8 +355,6 @@ std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
const StoreInst *SI);
std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
const AllocaInst *AI);
-std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
- const VPIntrinsic *VPI);
} // end namespace at
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index f16e6a75d7394..f74a014a0bafd 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -1103,7 +1103,7 @@ class AssignmentTrackingLowering {
using UntaggedStoreAssignmentMap =
DenseMap<const Instruction *,
SmallVector<std::pair<VariableID, at::AssignmentInfo>>>;
- using NonContiguousStoreAssignmentMap =
+ using UnknownStoreAssignmentMap =
DenseMap<const Instruction *, SmallVector<VariableID>>;
private:
@@ -1115,9 +1115,9 @@ class AssignmentTrackingLowering {
/// Map untagged stores to the variable fragments they assign to. Used by
/// processUntaggedInstruction.
UntaggedStoreAssignmentMap UntaggedStoreVars;
- /// Map untagged non-contiguous stores (e.g. strided/masked store intrinsics)
+ /// Map untagged unknown stores (e.g. strided/masked store intrinsics)
/// to the variables they may assign to. Used by processUntaggedInstruction.
- NonContiguousStoreAssignmentMap NonContiguousStoreVars;
+ UnknownStoreAssignmentMap UnknownStoreVars;
// Machinery to defer inserting dbg.values.
using InstInsertMap = MapVector<VarLocInsertPt, SmallVector<VarLocInfo>>;
@@ -1360,8 +1360,8 @@ class AssignmentTrackingLowering {
/// Update \p LiveSet after encountering an instruciton without a DIAssignID
/// attachment, \p I.
void processUntaggedInstruction(Instruction &I, BlockInfo *LiveSet);
- void processNonContiguousStoreToVariable(Instruction &I, VariableID &Var,
- BlockInfo *LiveSet);
+ void processUnknownStoreToVariable(Instruction &I, VariableID &Var,
+ BlockInfo *LiveSet);
void processDbgAssign(AssignRecord Assign, BlockInfo *LiveSet);
void processDbgVariableRecord(DbgVariableRecord &DVR, BlockInfo *LiveSet);
void processDbgValue(
@@ -1611,7 +1611,7 @@ void AssignmentTrackingLowering::processNonDbgInstruction(
processUntaggedInstruction(I, LiveSet);
}
-void AssignmentTrackingLowering::processNonContiguousStoreToVariable(
+void AssignmentTrackingLowering::processUnknownStoreToVariable(
Instruction &I, VariableID &Var, BlockInfo *LiveSet) {
// We may have assigned to some unknown fragment of the variable, so
// treat the memory assignment as unknown for now.
@@ -1666,19 +1666,18 @@ void AssignmentTrackingLowering::processUntaggedInstruction(
assert(!I.hasMetadata(LLVMContext::MD_DIAssignID));
auto It = UntaggedStoreVars.find(&I);
if (It == UntaggedStoreVars.end()) {
- // It is possible that we have an untagged non-contiguous store, which we do
+ // It is possible that we have an untagged unknown store, which we do
// not currently support - in this case we should undef the stack location
// of the variable, as if we had a tagged store that did not match the
// current assignment.
- // FIXME: It should be possible to support non-contiguous stores, but it
+ // FIXME: It should be possible to support unknown stores, but it
// would require more extensive changes to our representation of assignments
// which assumes a single offset+size.
- if (auto UnhandledStoreIt = NonContiguousStoreVars.find(&I);
- UnhandledStoreIt != NonContiguousStoreVars.end()) {
- LLVM_DEBUG(dbgs() << "Processing untagged non-contiguous store " << I
- << "\n");
+ if (auto UnhandledStoreIt = UnknownStoreVars.find(&I);
+ UnhandledStoreIt != UnknownStoreVars.end()) {
+ LLVM_DEBUG(dbgs() << "Processing untagged unknown store " << I << "\n");
for (auto &Var : UnhandledStoreIt->second)
- processNonContiguousStoreToVariable(I, Var, LiveSet);
+ processUnknownStoreToVariable(I, Var, LiveSet);
}
return; // No variables associated with the store destination.
}
@@ -2180,14 +2179,11 @@ getUntaggedStoreAssignmentInfo(const Instruction &I, const DataLayout &Layout) {
return at::getAssignmentInfo(Layout, SI);
if (const auto *MI = dyn_cast<MemIntrinsic>(&I))
return at::getAssignmentInfo(Layout, MI);
- if (const auto *VPI = dyn_cast<VPIntrinsic>(&I))
- return at::getAssignmentInfo(Layout, VPI);
// Alloca or non-store-like inst.
return std::nullopt;
}
-AllocaInst *getNonContiguousStore(const Instruction &I,
- const DataLayout &Layout) {
+AllocaInst *getUnknownStore(const Instruction &I, const DataLayout &Layout) {
auto *II = dyn_cast<IntrinsicInst>(&I);
if (!II)
return nullptr;
@@ -2228,8 +2224,8 @@ DbgVariableRecord *DynCastToDbgDeclare(DbgVariableRecord *DVR) {
/// subsequent variables are either stack homed or fully promoted.
///
/// Finally, populate UntaggedStoreVars with a mapping of untagged stores to
-/// the stored-to variable fragments, and NonContiguousStoreVars with a mapping
-/// of untagged non-contiguous stores to the stored-to variable aggregates.
+/// the stored-to variable fragments, and UnknownStoreVars with a mapping
+/// of untagged unknown stores to the stored-to variable aggregates.
///
/// These tasks are bundled together to reduce the number of times we need
/// to iterate over the function as they can be achieved together in one pass.
@@ -2237,8 +2233,7 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
Function &Fn, FunctionVarLocsBuilder *FnVarLocs,
const DenseSet<DebugAggregate> &VarsWithStackSlot,
AssignmentTrackingLowering::UntaggedStoreAssignmentMap &UntaggedStoreVars,
- AssignmentTrackingLowering::NonContiguousStoreAssignmentMap
- &NonContiguousStoreVars,
+ AssignmentTrackingLowering::UnknownStoreAssignmentMap &UnknownStoreVars,
unsigned &TrackedVariablesVectorSize) {
DenseSet<DebugVariable> Seen;
// Map of Variable: [Fragments].
@@ -2247,7 +2242,7 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
// - dbg.declare -> add single location variable record
// - dbg.* -> Add fragments to FragmentMap
// - untagged store -> Add fragments to FragmentMap and update
- // UntaggedStoreVars, or add to NonContiguousStoreVars if
+ // UntaggedStoreVars, or add to UnknownStoreVars if
// we can't determine the fragment overlap.
// We need to add fragments for untagged stores too so that we can correctly
// clobber overlapped fragment locations later.
@@ -2311,9 +2306,9 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
HandleDbgAssignForStore(DAI);
for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(Info->Base))
HandleDbgAssignForStore(DVR);
- } else if (auto *AI = getNonContiguousStore(I, Fn.getDataLayout())) {
+ } else if (auto *AI = getUnknownStore(I, Fn.getDataLayout())) {
// Find markers linked to this alloca.
- auto HandleDbgAssignForNonContiguousStore = [&](auto *Assign) {
+ auto HandleDbgAssignForUnknownStore = [&](auto *Assign) {
// Because we can't currently represent the fragment info for this
// store, we treat it as an unusable store to the whole variable.
DebugVariable DV =
@@ -2324,12 +2319,12 @@ static AssignmentTrackingLowering::OverlapMap buildOverlapMapAndRecordDeclares(
return;
// Cache this info for later.
- NonContiguousStoreVars[&I].push_back(FnVarLocs->insertVariable(DV));
+ UnknownStoreVars[&I].push_back(FnVarLocs->insertVariable(DV));
};
for (DbgAssignIntrinsic *DAI : at::getAssignmentMarkers(AI))
- HandleDbgAssignForNonContiguousStore(DAI);
+ HandleDbgAssignForUnknownStore(DAI);
for (DbgVariableRecord *DVR : at::getDVRAssignmentMarkers(AI))
- HandleDbgAssignForNonContiguousStore(DVR);
+ HandleDbgAssignForUnknownStore(DVR);
}
}
}
@@ -2404,8 +2399,8 @@ bool AssignmentTrackingLowering::run(FunctionVarLocsBuilder *FnVarLocsBuilder) {
// neither does LiveDebugVariables) because that is difficult to do and
// appears to be rare occurance.
VarContains = buildOverlapMapAndRecordDeclares(
- Fn, FnVarLocs, *VarsWithStackSlot, UntaggedStoreVars,
- NonContiguousStoreVars, TrackedVariablesVectorSize);
+ Fn, FnVarLocs, *VarsWithStackSlot, UntaggedStoreVars, UnknownStoreVars,
+ TrackedVariablesVectorSize);
// Prepare for traversal.
ReversePostOrderTraversal<Function *> RPOT(&Fn);
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index 185157f5b8eaa..cc36b71190ce2 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -2062,24 +2062,6 @@ std::optional<AssignmentInfo> at::getAssignmentInfo(const DataLayout &DL,
return getAssignmentInfoImpl(DL, AI, SizeInBits);
}
-std::optional<AssignmentInfo> at::getAssignmentInfo(const DataLayout &DL,
- const VPIntrinsic *VPI) {
- auto *Dest = VPI->getMemoryPointerParam();
- if (!Dest)
- return {};
- // FIXME: It should be possible to deal with strided store and scatter
- // intrinsics as well, but the existing AssignmentInfo structure cannot
- // sufficiently describe them.
- if (VPI->getIntrinsicID() == Intrinsic::experimental_vp_strided_store ||
- VPI->getIntrinsicID() == Intrinsic::vp_scatter)
- return {};
- assert(VPI->getMemoryDataParam() &&
- "Memory VPIntrinsic missing a data param?");
- TypeSize SizeInBits =
- DL.getTypeSizeInBits(VPI->getMemoryDataParam()->getType());
- return getAssignmentInfoImpl(DL, Dest, SizeInBits);
-}
-
/// Returns nullptr if the assignment shouldn't be attributed to this variable.
static void emitDbgAssign(AssignmentInfo Info, Value *Val, Value *Dest,
Instruction &StoreLikeInst, const VarRecord &VarRec,
>From e0ba7e2a3c5c90ae33965ca415408d2dcab61a13 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Thu, 27 Feb 2025 15:48:06 +0000
Subject: [PATCH 4/6] Slight comment update
---
llvm/lib/IR/Verifier.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 7326efbf757e5..fa76ee90b83dd 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4959,7 +4959,7 @@ void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
// DIAssignID metadata must be attached to either an alloca or some form of
// store/memory-writing instruction.
// FIXME: Is there any simpler way to express this property than manually
- // enumerating all instructions that could perform a store?
+ // enumerating all instructions that could perform an assignment?
bool ExpectedInstTy = isa<AllocaInst>(I) || isa<StoreInst>(I);
if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
const static Intrinsic::ID StoreIntrinsics[] = {
>From fae7510389705f36e6171591939361599eca635a Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Thu, 27 Feb 2025 15:59:07 +0000
Subject: [PATCH 5/6] Fixup some editing and test mistakes
---
.../CodeGen/AssignmentTrackingAnalysis.cpp | 24 +++++++++----------
.../RISCV/di-assignment-tracking-vector.ll | 2 +-
.../test/Verifier/diassignid-vector-stores.ll | 12 +++++-----
3 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index f74a014a0bafd..227140aa40a01 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -23,6 +23,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PrintPasses.h"
@@ -1621,7 +1622,7 @@ void AssignmentTrackingLowering::processUnknownStoreToVariable(
if (getLocKind(LiveSet, Var) != LocKind::Mem)
return;
// If there is a live debug value for this variable, fall back to using
- // that
+ // that.
Assignment DbgAV = LiveSet->getAssignment(BlockInfo::Debug, Var);
if (DbgAV.Status != Assignment::NoneOrPhi && DbgAV.Source) {
LLVM_DEBUG(dbgs() << "Switching to fallback debug value: ";
@@ -1635,7 +1636,7 @@ void AssignmentTrackingLowering::processUnknownStoreToVariable(
auto InsertBefore = getNextNode(&I);
assert(InsertBefore && "Shouldn't be inserting after a terminator");
- // Get DILocation for this unrecorded assignment.
+ // Get DILocation for this assignment.
DebugVariable V = FnVarLocs->getVariable(Var);
DILocation *InlinedAt = const_cast<DILocation *>(V.getInlinedAt());
const DILocation *DILoc = DILocation::get(
@@ -1666,13 +1667,12 @@ void AssignmentTrackingLowering::processUntaggedInstruction(
assert(!I.hasMetadata(LLVMContext::MD_DIAssignID));
auto It = UntaggedStoreVars.find(&I);
if (It == UntaggedStoreVars.end()) {
- // It is possible that we have an untagged unknown store, which we do
- // not currently support - in this case we should undef the stack location
- // of the variable, as if we had a tagged store that did not match the
- // current assignment.
- // FIXME: It should be possible to support unknown stores, but it
- // would require more extensive changes to our representation of assignments
- // which assumes a single offset+size.
+ // It is possible that we have an untagged unknown store, i.e. one that
+ // cannot be represented as a simple (base, offset, size) - in this case we
+ // should undef the memory location of the variable, as if we had a tagged
+ // store that did not match the current assignment.
+ // FIXME: It should be possible to support these stores, but it would
+ // require more extensive changes to our representation of assignments.
if (auto UnhandledStoreIt = UnknownStoreVars.find(&I);
UnhandledStoreIt != UnknownStoreVars.end()) {
LLVM_DEBUG(dbgs() << "Processing untagged unknown store " << I << "\n");
@@ -2190,14 +2190,14 @@ AllocaInst *getUnknownStore(const Instruction &I, const DataLayout &Layout) {
Intrinsic::ID ID = II->getIntrinsicID();
if (ID != Intrinsic::experimental_vp_strided_store &&
ID != Intrinsic::masked_store && ID != Intrinsic::vp_scatter &&
- ID != Intrinsic::masked_scatter)
+ ID != Intrinsic::masked_scatter && ID != Intrinsic::vp_store)
return nullptr;
Value *MemOp = II->getArgOperand(1);
- // We don't actually use the constant offsets for now, but we may in future,
+ // We don't actually use the constant offset for now, but we may in future,
// and the non-accumulating versions do not support a vector of pointers.
APInt Offset(Layout.getIndexTypeSizeInBits(MemOp->getType()), 0);
Value *Base = MemOp->stripAndAccumulateConstantOffsets(Layout, Offset, true);
- // For Base pointers that are not a single alloca value we don't need to do
+ // For Base pointers that are not an alloca instruction we don't need to do
// anything, and simply return nullptr.
return dyn_cast<AllocaInst>(Base);
}
diff --git a/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll b/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
index 0be9fcd830fd9..f8876a922431e 100644
--- a/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
+++ b/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
@@ -42,7 +42,7 @@ entry:
ret void
}
-attributes #0 = { "target-features"="+64bit,+a,+c,+d,+f,+m,+relax,+v,+zaamo,+zalrsc,+zicsr,+zifencei,+zmmul,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b,-b,-e,-experimental-sdext,-experimental-sdtrig,-experimental-smctr,-experimental-ssctr,-experimental-svukte,-experimental-xqcia,-experimental-xqciac,-experimental-xqcicli,-experimental-xqcicm,-experimental-xqcics,-experimental-xqcicsr,-experimental-xqciint,-experimental-xqcilo,-experimental-xqcilsm,-experimental-xqcisls,-experimental-zalasr,-experimental-zicfilp,-experimental-zicfiss,-experimental-zvbc32e,-experimental-zvkgs,-h,-sha,-shcounterenw,-shgatpa,-shtvala,-shvsatpa,-shvstvala,-shvstvecd,-smaia,-smcdeleg,-smcsrind,-smdbltrp,-smepmp,-smmpm,-smnpm,-smrnmi,-smstateen,-ssaia,-ssccfg,-ssccptr,-sscofpmf,-sscounterenw,-sscsrind,-ssdbltrp,-ssnpm,-sspm,-ssqosid,-ssstateen,-ssstrict,-sstc,-sstvala,-sstvecd,-ssu64xl,-supm,-svade,-svadu,-svbare,-svinval,-svnapot,-svpbmt,-svvptc,-xcvalu,-xcvbi,-xcvbitmanip,-xcvelw,-xcvmac,-xcvmem,-xcvsimd,-xmipscmove,-xmipslsp,-xsfcease,-xsfvcp,-xsfvfnrclipxfqf,-xsfvfwmaccqqq,-xsfvqmaccdod,-xsfvqmaccqoq,-xsifivecdiscarddlone,-xsifivecflushdlone,-xtheadba,-xtheadbb,-xtheadbs,-xtheadcmo,-xtheadcondmov,-xtheadfmemidx,-xtheadmac,-xtheadmemidx,-xtheadmempair,-xtheadsync,-xtheadvdot,-xventanacondops,-xwchc,-za128rs,-za64rs,-zabha,-zacas,-zama16b,-zawrs,-zba,-zbb,-zbc,-zbkb,-zbkc,-zbkx,-zbs,-zca,-zcb,-zcd,-zce,-zcf,-zcmop,-zcmp,-zcmt,-zdinx,-zfa,-zfbfmin,-zfh,-zfhmin,-zfinx,-zhinx,-zhinxmin,-zic64b,-zicbom,-zicbop,-zicboz,-ziccamoa,-ziccif,-zicclsm,-ziccrse,-zicntr,-zicond,-zihintntl,-zihintpause,-zihpm,-zimop,-zk,-zkn,-zknd,-zkne,-zknh,-zkr,-zks,-zksed,-zksh,-zkt,-ztso,-zvbb,-zvbc,-zvfbfmin,-zvfbfwma,-zvfh,-zvfhmin,-zvkb,-zvkg,-zvkn,-zvknc,-zvkned,-zvkng,-zvknha,-zvknhb,-zvks,-zvksc,-zvksed,-zvksg,-zvksh,-zvkt,-zvl1024b,-zvl16384b,-zvl2048b,-zvl256b,-zvl32768b,-zvl4096b,-zvl512b,-zvl65536b,-zvl8192b" }
+attributes #0 = { "target-features"="+v" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Verifier/diassignid-vector-stores.ll b/llvm/test/Verifier/diassignid-vector-stores.ll
index 7fe1ee85e6c9d..3ccb15c8a4e76 100644
--- a/llvm/test/Verifier/diassignid-vector-stores.ll
+++ b/llvm/test/Verifier/diassignid-vector-stores.ll
@@ -6,12 +6,12 @@
; CHECK-NEXT: @llvm.vp.load.v2i8.p0
define void @f() !dbg !5 {
- call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr undef, <2 x i1> undef, i32 undef), !DIAssignID !6
- call void @llvm.vp.scatter.v2i8.v2p0(<2 x i8> undef, <2 x ptr> undef, <2 x i1> undef, i32 undef), !DIAssignID !7
- call void @llvm.experimental.vp.strided.store.v2i8.i64(<2 x i8> undef, ptr undef, i64 undef, <2 x i1> undef, i32 undef), !DIAssignID !8
- call void @llvm.masked.store.v2i8.p0(<2 x i8> undef, ptr undef, i32 1, <2 x i1> undef), !DIAssignID !9
- call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> undef, <2 x ptr> undef, i32 1, <2 x i1> undef), !DIAssignID !10
- %r = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr undef, <2 x i1> undef, i32 undef), !DIAssignID !11
+ call void @llvm.vp.store.v2i8.p0(<2 x i8> poison, ptr poison, <2 x i1> poison, i32 poison), !DIAssignID !6
+ call void @llvm.vp.scatter.v2i8.v2p0(<2 x i8> poison, <2 x ptr> poison, <2 x i1> poison, i32 poison), !DIAssignID !7
+ call void @llvm.experimental.vp.strided.store.v2i8.i64(<2 x i8> poison, ptr poison, i64 poison, <2 x i1> poison, i32 poison), !DIAssignID !8
+ call void @llvm.masked.store.v2i8.p0(<2 x i8> poison, ptr poison, i32 1, <2 x i1> poison), !DIAssignID !9
+ call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> poison, <2 x ptr> poison, i32 1, <2 x i1> poison), !DIAssignID !10
+ %r = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr poison, <2 x i1> poison, i32 poison), !DIAssignID !11
ret void
}
>From b1a158b4bbf5074f718f6c430beb756c2fd10005 Mon Sep 17 00:00:00 2001
From: Stephen Tozer <stephen.tozer at sony.com>
Date: Fri, 7 Mar 2025 12:40:06 +0000
Subject: [PATCH 6/6] Include llvm.masked.compressstore, relax verifier
---
llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp | 3 ++-
llvm/lib/IR/Verifier.cpp | 15 ++++-----------
.../RISCV/di-assignment-tracking-vector.ll | 2 +-
3 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 227140aa40a01..e4eef4ec5e46b 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -2190,7 +2190,8 @@ AllocaInst *getUnknownStore(const Instruction &I, const DataLayout &Layout) {
Intrinsic::ID ID = II->getIntrinsicID();
if (ID != Intrinsic::experimental_vp_strided_store &&
ID != Intrinsic::masked_store && ID != Intrinsic::vp_scatter &&
- ID != Intrinsic::masked_scatter && ID != Intrinsic::vp_store)
+ ID != Intrinsic::masked_scatter && ID != Intrinsic::vp_store &&
+ ID != Intrinsic::masked_compressstore)
return nullptr;
Value *MemOp = II->getArgOperand(1);
// We don't actually use the constant offset for now, but we may in future,
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index fa76ee90b83dd..a7d880bd7a45b 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4958,17 +4958,10 @@ void Verifier::visitDIAssignIDMetadata(Instruction &I, MDNode *MD) {
assert(I.hasMetadata(LLVMContext::MD_DIAssignID));
// DIAssignID metadata must be attached to either an alloca or some form of
// store/memory-writing instruction.
- // FIXME: Is there any simpler way to express this property than manually
- // enumerating all instructions that could perform an assignment?
- bool ExpectedInstTy = isa<AllocaInst>(I) || isa<StoreInst>(I);
- if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
- const static Intrinsic::ID StoreIntrinsics[] = {
- Intrinsic::vp_store, Intrinsic::vp_scatter,
- Intrinsic::experimental_vp_strided_store, Intrinsic::masked_store,
- Intrinsic::masked_scatter};
- ExpectedInstTy |= is_contained(StoreIntrinsics, II->getIntrinsicID()) ||
- isa<MemIntrinsic>(II);
- }
+ // FIXME: We allow all intrinsic insts here to avoid trying to enumerate all
+ // possible store intrinsics.
+ bool ExpectedInstTy =
+ isa<AllocaInst>(I) || isa<StoreInst>(I) || isa<IntrinsicInst>(I);
CheckDI(ExpectedInstTy, "!DIAssignID attached to unexpected instruction kind",
I, MD);
// Iterate over the MetadataAsValue uses of the DIAssignID - these should
diff --git a/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll b/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
index f8876a922431e..9c26df0d3d278 100644
--- a/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
+++ b/llvm/test/CodeGen/RISCV/di-assignment-tracking-vector.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=riscv64 < %s -o - | FileCheck %s --implicit-check-not=DEBUG_VALUE
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s --implicit-check-not=DEBUG_VALUE
;; Verify that tagged and untagged non-contiguous stores are handled correctly
;; by assignment tracking.
More information about the llvm-commits
mailing list