[llvm-branch-commits] [HWASan] [MTE] allow lifetimes with multiple starts (PR #175608)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Mon Jan 12 10:54:28 PST 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-compiler-rt-sanitizer
Author: Florian Mayer (fmayer)
<details>
<summary>Changes</summary>
---
Patch is 75.71 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/175608.diff
6 Files Affected:
- (modified) llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h (+10-9)
- (modified) llvm/lib/Target/AArch64/AArch64StackTagging.cpp (+4-4)
- (modified) llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp (+8-9)
- (modified) llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp (+28-17)
- (modified) llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll (+32)
- (modified) llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll (+929-8)
``````````diff
diff --git a/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h b/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
index e0cdcf84012e8..3a6783b055dc2 100644
--- a/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
+++ b/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
@@ -19,6 +19,7 @@
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Alignment.h"
namespace llvm {
@@ -28,6 +29,14 @@ class PostDominatorTree;
class AllocaInst;
class Instruction;
namespace memtag {
+struct AllocaInfo {
+ AllocaInst *AI;
+ SmallVector<IntrinsicInst *, 2> LifetimeStart;
+ SmallVector<IntrinsicInst *, 2> LifetimeEnd;
+ SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
+ MapVector<BasicBlock *, Intrinsic::ID> LastBBLifetime;
+};
+
// For an alloca valid between lifetime markers Start and Ends, call the
// Callback for all possible exits out of the lifetime in the containing
// function, which can return from the instructions in RetVec.
@@ -36,8 +45,7 @@ namespace memtag {
// the caller should remove Ends to ensure that work done at the other
// exits does not happen outside of the lifetime.
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
- const LoopInfo &LI, const Instruction *Start,
- const SmallVectorImpl<IntrinsicInst *> &Ends,
+ const LoopInfo &LI, const AllocaInfo &AInfo,
const SmallVectorImpl<Instruction *> &RetVec,
llvm::function_ref<void(Instruction *)> Callback);
@@ -48,13 +56,6 @@ bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
Instruction *getUntagLocationIfFunctionExit(Instruction &Inst);
-struct AllocaInfo {
- AllocaInst *AI;
- SmallVector<IntrinsicInst *, 2> LifetimeStart;
- SmallVector<IntrinsicInst *, 2> LifetimeEnd;
- SmallVector<DbgVariableRecord *, 2> DbgVariableRecords;
-};
-
struct StackInfo {
MapVector<AllocaInst *, AllocaInfo> AllocasToInstrument;
SmallVector<Instruction *, 8> RetVec;
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index d87bb522c99e8..e63d44ff48f1a 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -583,15 +583,15 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, DT, LI,
ClMaxLifetimes);
if (StandardLifetime) {
- IntrinsicInst *Start = Info.LifetimeStart[0];
uint64_t Size = *Info.AI->getAllocationSize(*DL);
Size = alignTo(Size, kTagGranuleSize);
- tagAlloca(AI, Start->getNextNode(), TagPCall, Size);
+ for (IntrinsicInst *Start : Info.LifetimeStart)
+ tagAlloca(AI, Start->getNextNode(), TagPCall, Size);
auto TagEnd = [&](Instruction *Node) { untagAlloca(AI, Node, Size); };
if (!DT || !PDT ||
- !memtag::forAllReachableExits(*DT, *PDT, *LI, Start, Info.LifetimeEnd,
- SInfo.RetVec, TagEnd)) {
+ !memtag::forAllReachableExits(*DT, *PDT, *LI, Info, SInfo.RetVec,
+ TagEnd)) {
for (auto *End : Info.LifetimeEnd)
End->eraseFromParent();
}
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 832592e7663b2..f7f82842989ef 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1508,16 +1508,15 @@ void HWAddressSanitizer::instrumentStack(memtag::StackInfo &SInfo,
// postdominator analysis, and will leave us to keep memory tagged after
// function return. Work around this by always untagging at every return
// statement if return_twice functions are called.
- bool StandardLifetime =
- !SInfo.CallsReturnTwice &&
+ if (DetectUseAfterScope && !SInfo.CallsReturnTwice &&
memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &DT,
- &LI, ClMaxLifetimes);
- if (DetectUseAfterScope && StandardLifetime) {
- IntrinsicInst *Start = Info.LifetimeStart[0];
- IRB.SetInsertPoint(Start->getNextNode());
- tagAlloca(IRB, AI, Tag, Size);
- if (!memtag::forAllReachableExits(DT, PDT, LI, Start, Info.LifetimeEnd,
- SInfo.RetVec, TagEnd)) {
+ &LI, ClMaxLifetimes)) {
+ for (IntrinsicInst *Start : Info.LifetimeStart) {
+ IRB.SetInsertPoint(Start->getNextNode());
+ tagAlloca(IRB, AI, Tag, Size);
+ }
+ if (!memtag::forAllReachableExits(DT, PDT, LI, Info, SInfo.RetVec,
+ TagEnd)) {
for (auto *End : Info.LifetimeEnd)
End->eraseFromParent();
}
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index ccd66b986f763..69dbee25bf52d 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -46,31 +46,40 @@ bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
} // namespace
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
- const LoopInfo &LI, const Instruction *Start,
- const SmallVectorImpl<IntrinsicInst *> &Ends,
+ const LoopInfo &LI, const AllocaInfo &AInfo,
const SmallVectorImpl<Instruction *> &RetVec,
llvm::function_ref<void(Instruction *)> Callback) {
- if (Ends.size() == 1 && PDT.dominates(Ends[0], Start)) {
- Callback(Ends[0]);
+ if (AInfo.LifetimeEnd.size() == 1 && AInfo.LifetimeStart.size() == 1 &&
+ PDT.dominates(AInfo.LifetimeEnd[0], AInfo.LifetimeStart[0])) {
+ Callback(AInfo.LifetimeEnd[0]);
return true;
}
SmallPtrSet<BasicBlock *, 2> EndBlocks;
- for (auto *End : Ends) {
- EndBlocks.insert(End->getParent());
+ SmallVector<BasicBlock *, 2> StartBlocks;
+ for (const auto &[BB, ID] : AInfo.LastBBLifetime) {
+ if (ID == Intrinsic::lifetime_end)
+ EndBlocks.insert(BB);
+ else
+ StartBlocks.push_back(BB);
}
bool UncoveredRets = false;
- for (auto *RI : RetVec) {
- // If there is an end in the same basic block as the return, we know for
- // sure that the return is covered. Otherwise, we can check whether there
- // is a way to reach the RI from the start of the lifetime without passing
- // through an end.
- if (!EndBlocks.contains(RI->getParent()) &&
- isPotentiallyReachable(Start, RI, &EndBlocks, &DT, &LI)) {
- Callback(RI);
- UncoveredRets = true;
+
+ if (!StartBlocks.empty()) {
+ for (auto *RI : RetVec) {
+ auto WL = StartBlocks;
+ // If the block with the return is an EndBlock (i.e. a block where the
+ // last relevant lifetime intrinsic is an end), we don't have to run a
+ // complicated algorithm to know that the RetInst is never reachable
+ // without going through an end.
+ if (!EndBlocks.contains(RI->getParent()) &&
+ isPotentiallyReachableFromMany(WL, RI->getParent(), &EndBlocks, &DT,
+ &LI)) {
+ Callback(RI);
+ UncoveredRets = true;
+ }
}
}
- for_each(Ends, Callback);
+ for_each(AInfo.LifetimeEnd, Callback);
// We may have inserted untag outside of the lifetime interval.
// Signal the caller to remove the lifetime end call for this alloca.
return !UncoveredRets;
@@ -83,7 +92,7 @@ bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
// An alloca that has exactly one start and end in every possible execution.
// If it has multiple ends, they have to be unreachable from each other, so
// at most one of them is actually used for each execution of the function.
- return LifetimeStart.size() == 1 &&
+ return LifetimeStart.size() > 0 &&
(LifetimeEnd.size() == 1 ||
(LifetimeEnd.size() > 0 &&
!maybeReachableFromEachOther(LifetimeEnd, DT, LI, MaxLifetimes)));
@@ -153,6 +162,8 @@ void StackInfoBuilder::visit(OptimizationRemarkEmitter &ORE,
Info.AllocasToInstrument[AI].LifetimeStart.push_back(II);
else
Info.AllocasToInstrument[AI].LifetimeEnd.push_back(II);
+ Info.AllocasToInstrument[AI].LastBBLifetime[II->getParent()] =
+ II->getIntrinsicID();
return;
}
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll b/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
index 0fac7ec0ac0e1..6903216bc835d 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-split-lifetime.ll
@@ -205,5 +205,37 @@ exit2:
ret void
}
+define void @multiple_start(i1 %cond) local_unnamed_addr sanitize_memtag {
+start:
+; CHECK-LABEL: start:
+; CHECK-NOT: call void @llvm.aarch64.settag
+ %a = alloca i8, i32 48, align 8
+ br i1 %cond, label %next0, label %next1
+
+next0:
+; CHECK-LABEL: next0:
+; CHECK: call void @llvm.aarch64.settag
+; CHECK: call void @llvm.aarch64.settag
+ call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+ call void @use8(ptr %a)
+ call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %a)
+ br label %exit1
+
+next1:
+; CHECK-LABEL: next1:
+; CHECK: call void @llvm.aarch64.settag
+; CHECK: call void @llvm.aarch64.settag
+ call void @llvm.lifetime.start.p0(i64 48, ptr nonnull %a)
+ call void @use8(ptr %a)
+ call void @llvm.lifetime.end.p0(i64 48, ptr nonnull %a)
+ br label %exit1
+
+exit1:
+; CHECK-LABEL: exit1:
+; CHECK-NOT: call void @llvm.aarch64.settag
+ ret void
+}
+
+
declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
index 6e8b009db02a8..3a2ed3370ffab 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/use-after-scope.ll
@@ -590,8 +590,8 @@ define dso_local i32 @multiple_lifetimes() local_unnamed_addr sanitize_hwaddress
; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
-; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
-; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
+; X86-SCOPE-NEXT: [[TMP13:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP13]], i64 16)
; X86-SCOPE-NEXT: ret i32 0
;
; X86-NOSCOPE-LABEL: @multiple_lifetimes(
@@ -654,12 +654,12 @@ define dso_local i32 @multiple_lifetimes() local_unnamed_addr sanitize_hwaddress
; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP29]], i8 [[TMP25]], i64 1, i1 false)
; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
-; AARCH64-SCOPE-NEXT: [[TMP30:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
-; AARCH64-SCOPE-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[TMP18]] to i64
-; AARCH64-SCOPE-NEXT: [[TMP32:%.*]] = and i64 [[TMP31]], 72057594037927935
-; AARCH64-SCOPE-NEXT: [[TMP33:%.*]] = lshr i64 [[TMP32]], 4
-; AARCH64-SCOPE-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP33]]
-; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP34]], i8 [[TMP30]], i64 1, i1 false)
+; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP37:%.*]] = and i64 [[TMP36]], 72057594037927935
+; AARCH64-SCOPE-NEXT: [[TMP38:%.*]] = lshr i64 [[TMP37]], 4
+; AARCH64-SCOPE-NEXT: [[TMP39:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP38]]
+; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP39]], i8 [[TMP35]], i64 1, i1 false)
; AARCH64-SCOPE-NEXT: ret i32 0
;
; AARCH64-NOSCOPE-LABEL: @multiple_lifetimes(
@@ -818,6 +818,287 @@ define dso_local i32 @multiple_lifetimes() local_unnamed_addr sanitize_hwaddress
ret i32 0
}
+define dso_local i32 @multiple_lifetimes_unterminated() local_unnamed_addr sanitize_hwaddress {
+; X86-SCOPE-LABEL: @multiple_lifetimes_unterminated(
+; X86-SCOPE-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr null)
+; X86-SCOPE-NEXT: [[TMP1:%.*]] = call ptr @llvm.frameaddress.p0(i32 0)
+; X86-SCOPE-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; X86-SCOPE-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 57
+; X86-SCOPE-NEXT: [[HWASAN_UAR_TAG:%.*]] = and i64 [[TMP3]], 63
+; X86-SCOPE-NEXT: [[TMP4:%.*]] = alloca { i8, [15 x i8] }, align 16
+; X86-SCOPE-NEXT: [[TMP5:%.*]] = call i8 @__hwasan_generate_tag()
+; X86-SCOPE-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
+; X86-SCOPE-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; X86-SCOPE-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -9079256848778919937
+; X86-SCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57
+; X86-SCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
+; X86-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8
+; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
+; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
+; X86-SCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
+; X86-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP4]])
+; X86-SCOPE-NEXT: [[TMP13:%.*]] = trunc i64 [[TMP6]] to i8
+; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP13]], i64 16)
+; X86-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
+; X86-SCOPE-NEXT: [[TMP14:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; X86-SCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP14]], i64 16)
+; X86-SCOPE-NEXT: ret i32 0
+;
+; X86-NOSCOPE-LABEL: @multiple_lifetimes_unterminated(
+; X86-NOSCOPE-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr null)
+; X86-NOSCOPE-NEXT: [[TMP1:%.*]] = call ptr @llvm.frameaddress.p0(i32 0)
+; X86-NOSCOPE-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; X86-NOSCOPE-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 57
+; X86-NOSCOPE-NEXT: [[HWASAN_UAR_TAG:%.*]] = and i64 [[TMP3]], 63
+; X86-NOSCOPE-NEXT: [[TMP4:%.*]] = alloca { i8, [15 x i8] }, align 16
+; X86-NOSCOPE-NEXT: [[TMP5:%.*]] = call i8 @__hwasan_generate_tag()
+; X86-NOSCOPE-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
+; X86-NOSCOPE-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP4]] to i64
+; X86-NOSCOPE-NEXT: [[TMP8:%.*]] = and i64 [[TMP7]], -9079256848778919937
+; X86-NOSCOPE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP6]], 57
+; X86-NOSCOPE-NEXT: [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
+; X86-NOSCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP10]] to ptr
+; X86-NOSCOPE-NEXT: [[TMP11:%.*]] = trunc i64 [[TMP6]] to i8
+; X86-NOSCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP11]], i64 16)
+; X86-NOSCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
+; X86-NOSCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
+; X86-NOSCOPE-NEXT: [[TMP12:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; X86-NOSCOPE-NEXT: call void @__hwasan_tag_memory(ptr [[TMP4]], i8 [[TMP12]], i64 16)
+; X86-NOSCOPE-NEXT: ret i32 0
+;
+; AARCH64-SCOPE-LABEL: @multiple_lifetimes_unterminated(
+; AARCH64-SCOPE-NEXT: [[TMP1:%.*]] = call ptr @llvm.thread.pointer.p0()
+; AARCH64-SCOPE-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 48
+; AARCH64-SCOPE-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8
+; AARCH64-SCOPE-NEXT: [[TMP4:%.*]] = ashr i64 [[TMP3]], 3
+; AARCH64-SCOPE-NEXT: [[TMP5:%.*]] = call i64 @llvm.read_register.i64(metadata [[META2]])
+; AARCH64-SCOPE-NEXT: [[TMP6:%.*]] = call ptr @llvm.frameaddress.p0(i32 0)
+; AARCH64-SCOPE-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[TMP6]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 44
+; AARCH64-SCOPE-NEXT: [[TMP9:%.*]] = or i64 [[TMP5]], [[TMP8]]
+; AARCH64-SCOPE-NEXT: [[TMP10:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; AARCH64-SCOPE-NEXT: store i64 [[TMP9]], ptr [[TMP10]], align 8
+; AARCH64-SCOPE-NEXT: [[TMP11:%.*]] = ashr i64 [[TMP3]], 56
+; AARCH64-SCOPE-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP11]], 12
+; AARCH64-SCOPE-NEXT: [[TMP13:%.*]] = xor i64 [[TMP12]], -1
+; AARCH64-SCOPE-NEXT: [[TMP14:%.*]] = add i64 [[TMP3]], 8
+; AARCH64-SCOPE-NEXT: [[TMP15:%.*]] = and i64 [[TMP14]], [[TMP13]]
+; AARCH64-SCOPE-NEXT: store i64 [[TMP15]], ptr [[TMP2]], align 8
+; AARCH64-SCOPE-NEXT: [[TMP16:%.*]] = or i64 [[TMP3]], 4294967295
+; AARCH64-SCOPE-NEXT: [[HWASAN_SHADOW:%.*]] = add i64 [[TMP16]], 1
+; AARCH64-SCOPE-NEXT: [[TMP17:%.*]] = inttoptr i64 [[HWASAN_SHADOW]] to ptr
+; AARCH64-SCOPE-NEXT: [[HWASAN_UAR_TAG:%.*]] = lshr i64 [[TMP7]], 56
+; AARCH64-SCOPE-NEXT: [[TMP18:%.*]] = alloca { i8, [15 x i8] }, align 16
+; AARCH64-SCOPE-NEXT: [[TMP19:%.*]] = call i8 @__hwasan_generate_tag()
+; AARCH64-SCOPE-NEXT: [[TMP20:%.*]] = zext i8 [[TMP19]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP22:%.*]] = and i64 [[TMP21]], 72057594037927935
+; AARCH64-SCOPE-NEXT: [[TMP23:%.*]] = shl i64 [[TMP20]], 56
+; AARCH64-SCOPE-NEXT: [[TMP24:%.*]] = or i64 [[TMP22]], [[TMP23]]
+; AARCH64-SCOPE-NEXT: [[ALLOCA_0_HWASAN:%.*]] = inttoptr i64 [[TMP24]] to ptr
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: [[TMP25:%.*]] = trunc i64 [[TMP20]] to i8
+; AARCH64-SCOPE-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP27:%.*]] = and i64 [[TMP26]], 72057594037927935
+; AARCH64-SCOPE-NEXT: [[TMP28:%.*]] = lshr i64 [[TMP27]], 4
+; AARCH64-SCOPE-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP28]]
+; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP29]], i8 [[TMP25]], i64 1, i1 false)
+; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
+; AARCH64-SCOPE-NEXT: [[TMP30:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; AARCH64-SCOPE-NEXT: [[TMP31:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP32:%.*]] = and i64 [[TMP31]], 72057594037927935
+; AARCH64-SCOPE-NEXT: [[TMP33:%.*]] = lshr i64 [[TMP32]], 4
+; AARCH64-SCOPE-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP33]]
+; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP34]], i8 [[TMP30]], i64 1, i1 false)
+; AARCH64-SCOPE-NEXT: call void @llvm.lifetime.start.p0(ptr nonnull [[TMP18]])
+; AARCH64-SCOPE-NEXT: [[TMP35:%.*]] = trunc i64 [[TMP20]] to i8
+; AARCH64-SCOPE-NEXT: [[TMP36:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP37:%.*]] = and i64 [[TMP36]], 72057594037927935
+; AARCH64-SCOPE-NEXT: [[TMP38:%.*]] = lshr i64 [[TMP37]], 4
+; AARCH64-SCOPE-NEXT: [[TMP39:%.*]] = getelementptr i8, ptr [[TMP17]], i64 [[TMP38]]
+; AARCH64-SCOPE-NEXT: call void @llvm.memset.p0.i64(ptr align 1 [[TMP39]], i8 [[TMP35]], i64 1, i1 false)
+; AARCH64-SCOPE-NEXT: call void @use(ptr nonnull [[ALLOCA_0_HWASAN]])
+; AARCH64-SCOPE-NEXT: [[TMP40:%.*]] = trunc i64 [[HWASAN_UAR_TAG]] to i8
+; AARCH64-SCOPE-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[TMP18]] to i64
+; AARCH64-SCOPE-NEXT: [[TMP42:%.*]] = and i64 [[TMP41]], 72057594037927935
+...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/175608
More information about the llvm-branch-commits
mailing list