[compiler-rt] e1657e3 - [asan] Add unaligned double ended container support
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 29 10:57:57 PST 2022
Author: Vitaly Buka
Date: 2022-11-29T10:56:17-08:00
New Revision: e1657e322902d973aea606d3557f938ce0e0f06c
URL: https://github.com/llvm/llvm-project/commit/e1657e322902d973aea606d3557f938ce0e0f06c
DIFF: https://github.com/llvm/llvm-project/commit/e1657e322902d973aea606d3557f938ce0e0f06c.diff
LOG: [asan] Add unaligned double ended container support
Differential Revision: https://reviews.llvm.org/D138771
Added:
Modified:
compiler-rt/lib/asan/asan_poisoning.cpp
compiler-rt/test/asan/TestCases/contiguous_container.cpp
Removed:
################################################################################
diff --git a/compiler-rt/lib/asan/asan_poisoning.cpp b/compiler-rt/lib/asan/asan_poisoning.cpp
index 680c04f471d9..1b5701c22148 100644
--- a/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -371,7 +371,8 @@ void __asan_unpoison_stack_memory(uptr addr, uptr size) {
}
static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
- uptr &old_end, uptr &new_end) {
+ uptr &old_beg, uptr &old_end, uptr &new_beg,
+ uptr &new_end) {
constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
uptr end_down = RoundDownTo(storage_end, granularity);
@@ -379,8 +380,12 @@ static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
// unpoisoned byte, because we can't poison the prefix anyway. Don't call
// AddressIsPoisoned at all if container changes does not affect the last
// granule at all.
- if (Max(old_end, new_end) > end_down && !AddressIsPoisoned(storage_end)) {
+ if ((((old_end != new_end) && Max(old_end, new_end) > end_down) ||
+ ((old_beg != new_beg) && Max(old_beg, new_beg) > end_down)) &&
+ !AddressIsPoisoned(storage_end)) {
+ old_beg = Min(end_down, old_beg);
old_end = Min(end_down, old_end);
+ new_beg = Min(end_down, new_beg);
new_end = Min(end_down, new_end);
}
}
@@ -390,12 +395,14 @@ static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
uptr beg_up = RoundUpTo(storage_beg, granularity);
// The first unaligned granule needs special handling only if we had bytes
// there before and will have none after.
- if (storage_beg == new_end && storage_beg != old_end &&
- storage_beg < beg_up) {
+ if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
+ old_beg < beg_up) {
// Keep granule prefix outside of the storage unpoisoned.
uptr beg_down = RoundDownTo(storage_beg, granularity);
*(u8 *)MemToShadow(beg_down) = storage_beg - beg_down;
+ old_beg = Max(beg_up, old_beg);
old_end = Max(beg_up, old_end);
+ new_beg = Max(beg_up, new_beg);
new_end = Max(beg_up, new_end);
}
}
@@ -413,6 +420,8 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
uptr storage_end = reinterpret_cast<uptr>(end_p);
uptr old_end = reinterpret_cast<uptr>(old_mid_p);
uptr new_end = reinterpret_cast<uptr>(new_mid_p);
+ uptr old_beg = storage_beg;
+ uptr new_beg = storage_beg;
uptr granularity = ASAN_SHADOW_GRANULARITY;
if (!(storage_beg <= old_end && storage_beg <= new_end &&
old_end <= storage_end && new_end <= storage_end)) {
@@ -426,7 +435,8 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
if (old_end == new_end)
return; // Nothing to do here.
- FixUnalignedStorage(storage_beg, storage_end, old_end, new_end);
+ FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
+ new_end);
uptr a = RoundDownTo(Min(old_end, new_end), granularity);
uptr c = RoundUpTo(Max(old_end, new_end), granularity);
@@ -499,13 +509,8 @@ void __sanitizer_annotate_double_ended_contiguous_container(
(old_beg == new_beg && old_end == new_end))
return; // Nothing to do here.
- // Right now, the function does not support:
- // - unaligned storage beginning
- // - situations when container ends in the middle of granule
- // (storage_end is unaligned by granularity)
- // and shares that granule with a
diff erent object.
- if (!AddrIsAlignedByGranularity(storage_beg))
- return;
+ FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
+ new_end);
if (old_beg == old_end) {
old_beg = old_end = new_beg;
diff --git a/compiler-rt/test/asan/TestCases/contiguous_container.cpp b/compiler-rt/test/asan/TestCases/contiguous_container.cpp
index e778c00f8dfa..6288e752fd4b 100644
--- a/compiler-rt/test/asan/TestCases/contiguous_container.cpp
+++ b/compiler-rt/test/asan/TestCases/contiguous_container.cpp
@@ -195,7 +195,10 @@ void TestDoubleEndedContainer(size_t capacity, size_t off_begin,
__sanitizer_double_ended_contiguous_container_find_bad_address(
st_beg, beg, cur, st_end);
- if (cur == end) {
+ if (cur == end ||
+ // The last unaligned granule of the storage followed by unpoisoned
+ // bytes looks the same.
+ (!poison_buffer && RoundDown(st_end) <= std::min(cur, end))) {
assert(is_valid);
assert(!bad_address);
continue;
@@ -219,9 +222,13 @@ void TestDoubleEndedContainer(size_t capacity, size_t off_begin,
st_beg, cur, end, st_end);
if (cur == beg ||
- // The first unaligned granule of non-empty container looks the
- // same.
- (std::max(beg, cur) < end && RoundDown(beg) == RoundDown(cur))) {
+ // The last unaligned granule of the storage followed by unpoisoned
+ // bytes looks the same.
+ (!poison_buffer && RoundDown(st_end) <= std::min(cur, beg) ||
+ // The first unaligned granule of non-empty container looks the
+ // same.
+ (std::max(beg, cur) < end &&
+ RoundDown(beg) == RoundDown(cur)))) {
assert(is_valid);
assert(!bad_address);
continue;
@@ -268,7 +275,7 @@ int main(int argc, char **argv) {
for (int j = 0; j < kGranularity * 2; j++) {
for (int poison = 0; poison < 2; ++poison) {
TestContainer(i, j, poison);
- TestDoubleEndedContainer(i, 0, true);
+ TestDoubleEndedContainer(i, j, poison);
}
}
}
More information about the llvm-commits
mailing list