[llvm] [BOLT][AArch64] Skip BBs only instead of functions (PR #81989)
Elvina Yakubova via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 27 07:30:25 PST 2024
https://github.com/ElvinaYakubova updated https://github.com/llvm/llvm-project/pull/81989
>From 57268e21d8723470b12c987d9a9f07437b55303a Mon Sep 17 00:00:00 2001
From: ElvinaYakubova <elvinayakubova at gmail.com>
Date: Tue, 27 Feb 2024 15:07:19 +0000
Subject: [PATCH] [BOLT][AArch64] Skip only BBs during instrumentation
---
bolt/include/bolt/Core/MCPlusBuilder.h | 12 +-
bolt/lib/Passes/Instrumentation.cpp | 107 ++++++++++++++----
.../Target/AArch64/AArch64MCPlusBuilder.cpp | 26 +++--
bolt/test/AArch64/exclusive-instrument.s | 90 +++++++++++++--
4 files changed, 196 insertions(+), 39 deletions(-)
diff --git a/bolt/include/bolt/Core/MCPlusBuilder.h b/bolt/include/bolt/Core/MCPlusBuilder.h
index 687b49a3cbda43..eeb7609ff6b5f1 100644
--- a/bolt/include/bolt/Core/MCPlusBuilder.h
+++ b/bolt/include/bolt/Core/MCPlusBuilder.h
@@ -620,7 +620,17 @@ class MCPlusBuilder {
return Info->get(Inst.getOpcode()).mayStore();
}
- virtual bool isAArch64Exclusive(const MCInst &Inst) const {
+ virtual bool isAArch64ExclusiveLoad(const MCInst &Inst) const {
+ llvm_unreachable("not implemented");
+ return false;
+ }
+
+ virtual bool isAArch64ExclusiveStore(const MCInst &Inst) const {
+ llvm_unreachable("not implemented");
+ return false;
+ }
+
+ virtual bool isAArch64ExclusiveClear(const MCInst &Inst) const {
llvm_unreachable("not implemented");
return false;
}
diff --git a/bolt/lib/Passes/Instrumentation.cpp b/bolt/lib/Passes/Instrumentation.cpp
index 760ca84b4ef1c9..34a922e182002c 100644
--- a/bolt/lib/Passes/Instrumentation.cpp
+++ b/bolt/lib/Passes/Instrumentation.cpp
@@ -17,7 +17,9 @@
#include "bolt/Utils/Utils.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/RWMutex.h"
+#include <queue>
#include <stack>
+#include <unordered_set>
#define DEBUG_TYPE "bolt-instrumentation"
@@ -86,33 +88,90 @@ cl::opt<bool> InstrumentCalls("instrument-calls",
namespace llvm {
namespace bolt {
-static bool hasAArch64ExclusiveMemop(BinaryFunction &Function) {
+static bool hasAArch64ExclusiveMemop(
+ BinaryFunction &Function,
+ std::unordered_set<const BinaryBasicBlock *> &BBToSkip) {
// FIXME ARMv8-a architecture reference manual says that software must avoid
// having any explicit memory accesses between exclusive load and associated
- // store instruction. So for now skip instrumentation for functions that have
- // these instructions, since it might lead to runtime deadlock.
+ // store instruction. So for now skip instrumentation for basic blocks that
+ // have these instructions, since it might lead to runtime deadlock.
BinaryContext &BC = Function.getBinaryContext();
- for (const BinaryBasicBlock &BB : Function)
- for (const MCInst &Inst : BB)
- if (BC.MIB->isAArch64Exclusive(Inst)) {
- if (opts::Verbosity >= 1)
- BC.outs() << "BOLT-INSTRUMENTER: Function " << Function
- << " has exclusive instructions, skip instrumentation\n";
+ std::queue<std::pair<BinaryBasicBlock *, bool>> BBQueue; // {BB, isLoad}
+ std::unordered_set<BinaryBasicBlock *> Visited;
+
+ if (Function.getLayout().block_begin() == Function.getLayout().block_end())
+ return 0;
+
+ BinaryBasicBlock *BBfirst = *Function.getLayout().block_begin();
+ BBQueue.push({BBfirst, false});
+
+ while (!BBQueue.empty()) {
+ BinaryBasicBlock *BB = BBQueue.front().first;
+ bool IsLoad = BBQueue.front().second;
+ BBQueue.pop();
+ if (Visited.find(BB) != Visited.end())
+ continue;
+ Visited.insert(BB);
+
+ for (const MCInst &Inst : *BB) {
+ // Two loads one after another - skip whole function
+ if (BC.MIB->isAArch64ExclusiveLoad(Inst) && IsLoad) {
+ if (opts::Verbosity >= 2) {
+ outs() << "BOLT-INSTRUMENTER: function " << Function.getPrintName()
+ << " has two exclusive loads. Ignoring the function.\n";
+ }
return true;
}
- return false;
-}
+ if (BC.MIB->isAArch64ExclusiveLoad(Inst))
+ IsLoad = true;
+
+ if (IsLoad && BBToSkip.find(BB) == BBToSkip.end()) {
+ BBToSkip.insert(BB);
+ if (opts::Verbosity >= 2) {
+ outs() << "BOLT-INSTRUMENTER: skip BB " << BB->getName()
+ << " due to exclusive instruction in function "
+ << Function.getPrintName() << "\n";
+ }
+ }
+
+ if (!IsLoad && BC.MIB->isAArch64ExclusiveStore(Inst)) {
+ if (opts::Verbosity >= 2) {
+ outs() << "BOLT-INSTRUMENTER: function " << Function.getPrintName()
+ << " has exclusive store without corresponding load. Ignoring "
+ "the function.\n";
+ }
+ return true;
+ }
+
+ if (IsLoad && (BC.MIB->isAArch64ExclusiveStore(Inst) ||
+ BC.MIB->isAArch64ExclusiveClear(Inst)))
+ IsLoad = false;
+ }
-uint32_t Instrumentation::getFunctionNameIndex(const BinaryFunction &Function) {
- auto Iter = FuncToStringIdx.find(&Function);
- if (Iter != FuncToStringIdx.end())
- return Iter->second;
- size_t Idx = Summary->StringTable.size();
- FuncToStringIdx.emplace(std::make_pair(&Function, Idx));
- Summary->StringTable.append(getEscapedName(Function.getOneName()));
- Summary->StringTable.append(1, '\0');
- return Idx;
+ if (IsLoad && BB->succ_size() == 0) {
+ if (opts::Verbosity >= 2) {
+ outs()
+ << "BOLT-INSTRUMENTER: function " << Function.getPrintName()
+ << " has exclusive load in trailing BB. Ignoring the function.\n";
+ }
+ return true;
+ }
+
+ for (BinaryBasicBlock *BBS : BB->successors())
+ BBQueue.push({BBS, IsLoad});
+ }
+
+ if (BBToSkip.size() == Visited.size()) {
+ if (opts::Verbosity >= 2) {
+ outs() << "BOLT-INSTRUMENTER: all BBs are marked with true. Ignoring the "
+ "function "
+ << Function.getPrintName() << "\n";
+ }
+ return true;
+ }
+
+ return false;
}
bool Instrumentation::createCallDescription(FunctionDescription &FuncDesc,
@@ -307,7 +366,8 @@ void Instrumentation::instrumentFunction(BinaryFunction &Function,
if (BC.isMachO() && Function.hasName("___GLOBAL_init_65535/1"))
return;
- if (BC.isAArch64() && hasAArch64ExclusiveMemop(Function))
+ std::unordered_set<const BinaryBasicBlock *> BBToSkip;
+ if (BC.isAArch64() && hasAArch64ExclusiveMemop(Function, BBToSkip))
return;
SplitWorklistTy SplitWorklist;
@@ -389,6 +449,11 @@ void Instrumentation::instrumentFunction(BinaryFunction &Function,
for (auto BBI = Function.begin(), BBE = Function.end(); BBI != BBE; ++BBI) {
BinaryBasicBlock &BB = *BBI;
+
+ // Skip BBs with exclusive load/stores
+ if (BBToSkip.find(&BB) != BBToSkip.end())
+ continue;
+
bool HasUnconditionalBranch = false;
bool HasJumpTable = false;
bool IsInvokeBlock = InvokeBlocks.count(&BB) > 0;
diff --git a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
index d90512e2122580..c1c09c70ab01da 100644
--- a/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
+++ b/bolt/lib/Target/AArch64/AArch64MCPlusBuilder.cpp
@@ -270,32 +270,38 @@ class AArch64MCPlusBuilder : public MCPlusBuilder {
return isLDRB(Inst) || isLDRH(Inst) || isLDRW(Inst) || isLDRX(Inst);
}
- bool isAArch64Exclusive(const MCInst &Inst) const override {
+ bool isAArch64ExclusiveLoad(const MCInst &Inst) const override {
return (Inst.getOpcode() == AArch64::LDXPX ||
Inst.getOpcode() == AArch64::LDXPW ||
Inst.getOpcode() == AArch64::LDXRX ||
Inst.getOpcode() == AArch64::LDXRW ||
Inst.getOpcode() == AArch64::LDXRH ||
Inst.getOpcode() == AArch64::LDXRB ||
- Inst.getOpcode() == AArch64::STXPX ||
- Inst.getOpcode() == AArch64::STXPW ||
- Inst.getOpcode() == AArch64::STXRX ||
- Inst.getOpcode() == AArch64::STXRW ||
- Inst.getOpcode() == AArch64::STXRH ||
- Inst.getOpcode() == AArch64::STXRB ||
Inst.getOpcode() == AArch64::LDAXPX ||
Inst.getOpcode() == AArch64::LDAXPW ||
Inst.getOpcode() == AArch64::LDAXRX ||
Inst.getOpcode() == AArch64::LDAXRW ||
Inst.getOpcode() == AArch64::LDAXRH ||
- Inst.getOpcode() == AArch64::LDAXRB ||
+ Inst.getOpcode() == AArch64::LDAXRB);
+ }
+
+ bool isAArch64ExclusiveStore(const MCInst &Inst) const override {
+ return (Inst.getOpcode() == AArch64::STXPX ||
+ Inst.getOpcode() == AArch64::STXPW ||
+ Inst.getOpcode() == AArch64::STXRX ||
+ Inst.getOpcode() == AArch64::STXRW ||
+ Inst.getOpcode() == AArch64::STXRH ||
+ Inst.getOpcode() == AArch64::STXRB ||
Inst.getOpcode() == AArch64::STLXPX ||
Inst.getOpcode() == AArch64::STLXPW ||
Inst.getOpcode() == AArch64::STLXRX ||
Inst.getOpcode() == AArch64::STLXRW ||
Inst.getOpcode() == AArch64::STLXRH ||
- Inst.getOpcode() == AArch64::STLXRB ||
- Inst.getOpcode() == AArch64::CLREX);
+ Inst.getOpcode() == AArch64::STLXRB);
+ }
+
+ bool isAArch64ExclusiveClear(const MCInst &Inst) const override {
+ return (Inst.getOpcode() == AArch64::CLREX);
}
bool isLoadFromStack(const MCInst &Inst) const {
diff --git a/bolt/test/AArch64/exclusive-instrument.s b/bolt/test/AArch64/exclusive-instrument.s
index 502dd83b2f2a5b..69cc0707aba8e4 100644
--- a/bolt/test/AArch64/exclusive-instrument.s
+++ b/bolt/test/AArch64/exclusive-instrument.s
@@ -6,32 +6,108 @@
// RUN: llvm-mc -filetype=obj -triple aarch64-unknown-unknown \
// RUN: %s -o %t.o
// RUN: %clang %cflags -fPIC -pie %t.o -o %t.exe -nostdlib -Wl,-q -Wl,-fini=dummy
-// RUN: llvm-bolt %t.exe -o %t.bolt -instrument -v=1 | FileCheck %s
+// RUN: llvm-bolt %t.exe -o %t.bolt -instrument -v=2 | FileCheck %s
-// CHECK: Function foo has exclusive instructions, skip instrumentation
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function foo
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function foo
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function foo
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function case1
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function case2
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function case2
+// CHECK: BOLT-INSTRUMENTER: function case3 has exclusive store without corresponding load. Ignoring the function.
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function case4
+// CHECK: BOLT-INSTRUMENTER: function case4 has two exclusive loads. Ignoring the function.
+// CHECK: BOLT-INSTRUMENTER: skip BB {{.*}} due to exclusive instruction in function case5
+// CHECK: BOLT-INSTRUMENTER: function case5 has exclusive load in trailing BB. Ignoring the function.
.global foo
.type foo, %function
foo:
+ # exclusive load and store in two bbs
ldaxr w9, [x10]
cbnz w9, .Lret
stlxr w12, w11, [x9]
cbz w12, foo
- clrex
.Lret:
+ clrex
ret
.size foo, .-foo
.global _start
.type _start, %function
_start:
- cmp x0, #0
- b.eq .Lexit
- bl foo
-.Lexit:
+ mov x0, #0
+ mov x1, #1
+ mov x2, #2
+ mov x3, #3
+
+ bl case1
+ bl case2
+ bl case3
+ bl case4
+ bl case5
+
ret
.size _start, .-_start
+# Case 1: exclusive load and store in one basic block
+.global case1
+.type case1, %function
+case1:
+ str x0, [x2]
+ ldxr w0, [x2]
+ add w0, w0, #1
+ stxr w1, w0, [x2]
+ ret
+.size case1, .-case1
+
+# Case 2: exclusive load and store in different blocks
+.global case2
+.type case2, %function
+case2:
+ b case2_load
+
+case2_load:
+ ldxr x0, [x2]
+ b case2_store
+
+case2_store:
+ add x0, x0, #1
+ stxr w1, x0, [x2]
+ ret
+.size case2, .-case2
+
+# Case 3: store without preceding load
+.global case3
+.type case3, %function
+case3:
+ stxr w1, x3, [x2]
+ ret
+.size case3, .-case3
+
+# Case 4: two exclusive load instructions in neighboring blocks
+.global case4
+.type case4, %function
+case4:
+ b case4_load
+
+case4_load:
+ ldxr x0, [x2]
+ b case4_load_next
+
+case4_load_next:
+ ldxr x1, [x2]
+ ret
+.size case4, .-case4
+
+# Case 5: Exclusive load without successor
+.global case5
+.type case5, %function
+case5:
+ ldxr x0, [x2]
+ ret
+.size case5, .-case5
+
.global dummy
.type dummy, %function
dummy:
More information about the llvm-commits
mailing list