[llvm-commits] [compiler-rt] r169076 - in /compiler-rt/trunk/lib/sanitizer_common: sanitizer_linux.cc sanitizer_mac.cc sanitizer_procmaps.h
Kostya Serebryany
kcc at google.com
Fri Nov 30 23:45:00 PST 2012
Besides, I really don't understand what the intended caching behavior.
What of /proc/self/maps has changed and we are reading old data from cache?
And if we can request the contents of /proc/self/maps from the user, why do
we need cache?
On Sat, Dec 1, 2012 at 8:49 AM, Kostya Serebryany <kcc at google.com> wrote:
>
>
> On Sat, Dec 1, 2012 at 6:53 AM, Alexey Samsonov <samsonov at google.com>wrote:
>
>>
>>
>> On Fri, Nov 30, 2012 at 6:39 PM, Alexander Potapenko <glider at google.com>wrote:
>>
>>> Author: glider
>>> Date: Fri Nov 30 20:39:45 2012
>>> New Revision: 169076
>>>
>>> URL: http://llvm.org/viewvc/llvm-project?rev=169076&view=rev
>>> Log:
>>> Add caching to the MemoryMappingLayout class on Linux. This is necessary
>>> for the cases when a sandbox prevents ASan from reading the mappings
>>> from /proc/self/maps.
>>> The mappings are currently being cached on each access to
>>> /proc/self/maps. In the future we'll need to add an API that allows the
>>> client to notify ASan about the sandbox.
>>>
>>>
>>> Modified:
>>> compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc
>>> compiler-rt/trunk/lib/sanitizer_common/sanitizer_mac.cc
>>> compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h
>>>
>>> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc?rev=169076&r1=169075&r2=169076&view=diff
>>>
>>> ==============================================================================
>>> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc (original)
>>> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_linux.cc Fri Nov 30
>>> 20:39:45 2012
>>> @@ -16,6 +16,7 @@
>>> #include "sanitizer_common.h"
>>> #include "sanitizer_internal_defs.h"
>>> #include "sanitizer_libc.h"
>>> +#include "sanitizer_mutex.h"
>>> #include "sanitizer_placement_new.h"
>>> #include "sanitizer_procmaps.h"
>>>
>>> @@ -217,13 +218,23 @@
>>> }
>>>
>>> // ----------------- sanitizer_procmaps.h
>>> +char *MemoryMappingLayout::cached_proc_self_maps_buff_ = NULL;
>>> +uptr MemoryMappingLayout::cached_proc_self_maps_buff_mmaped_size_ = 0;
>>> +uptr MemoryMappingLayout::cached_proc_self_maps_buff_len_ = 0;
>>> +StaticSpinMutex MemoryMappingLayout::cache_lock_;
>>> +
>>> MemoryMappingLayout::MemoryMappingLayout() {
>>> proc_self_maps_buff_len_ =
>>> ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_,
>>> &proc_self_maps_buff_mmaped_size_, 1 << 26);
>>> - CHECK_GT(proc_self_maps_buff_len_, 0);
>>> + if (proc_self_maps_buff_mmaped_size_ == 0) {
>>> + LoadFromCache();
>>> + CHECK_GT(proc_self_maps_buff_len_, 0);
>>> + }
>>> // internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_);
>>> Reset();
>>> + // FIXME: in the future we may want to cache the mappings on demand
>>> only.
>>> + CacheMemoryMappings();
>>> }
>>>
>>> MemoryMappingLayout::~MemoryMappingLayout() {
>>> @@ -234,6 +245,39 @@
>>> current_ = proc_self_maps_buff_;
>>> }
>>>
>>> +// static
>>> +void MemoryMappingLayout::CacheMemoryMappings() {
>>> + SpinMutexLock l(&cache_lock_);
>>> + // Don't invalidate the cache if the mappings are unavailable.
>>> + char *old_proc_self_maps_buff_ = cached_proc_self_maps_buff_;
>>> + uptr old_proc_self_maps_buff_mmaped_size_ =
>>> + cached_proc_self_maps_buff_mmaped_size_;
>>> + uptr old_proc_self_maps_buff_len_ = cached_proc_self_maps_buff_len_;
>>> + cached_proc_self_maps_buff_len_ =
>>> + ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_buff_,
>>> + &cached_proc_self_maps_buff_mmaped_size_, 1 <<
>>> 26);
>>> + if (cached_proc_self_maps_buff_mmaped_size_ == 0) {
>>> + cached_proc_self_maps_buff_ = old_proc_self_maps_buff_;
>>> + cached_proc_self_maps_buff_mmaped_size_ =
>>> + old_proc_self_maps_buff_mmaped_size_;
>>> + cached_proc_self_maps_buff_len_ = old_proc_self_maps_buff_len_;
>>> + } else {
>>> + if (old_proc_self_maps_buff_mmaped_size_) {
>>> + UnmapOrDie(old_proc_self_maps_buff_,
>>> + old_proc_self_maps_buff_mmaped_size_);
>>>
>>
>> OMG. I really think we need to wrap buff_, mmaped_size_ and buff_len_ in
>> struct.
>>
> +1
>
>>
>>
>>> + }
>>> + }
>>> +}
>>> +
>>> +void MemoryMappingLayout::LoadFromCache() {
>>> + SpinMutexLock l(&cache_lock_);
>>> + if (cached_proc_self_maps_buff_) {
>>> + proc_self_maps_buff_ = cached_proc_self_maps_buff_;
>>> + proc_self_maps_buff_len_ = cached_proc_self_maps_buff_len_;
>>> + proc_self_maps_buff_mmaped_size_ =
>>> cached_proc_self_maps_buff_mmaped_size_;
>>> + }
>>> +}
>>> +
>>> // Parse a hex value in str and update str.
>>> static uptr ParseHex(char **str) {
>>> uptr x = 0;
>>>
>>> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_mac.cc
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_mac.cc?rev=169076&r1=169075&r2=169076&view=diff
>>>
>>> ==============================================================================
>>> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_mac.cc (original)
>>> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_mac.cc Fri Nov 30
>>> 20:39:45 2012
>>> @@ -162,6 +162,15 @@
>>> current_filetype_ = 0;
>>> }
>>>
>>> +// static
>>> +void MemoryMappingLayout::CacheMemoryMappings() {
>>> + // No-op on Mac for now.
>>> +}
>>> +
>>> +void MemoryMappingLayout::LoadFromCache() {
>>> + // No-op on Mac for now.
>>> +}
>>> +
>>> // Next and NextSegmentLoad were inspired by base/sysinfo.cc in
>>> // Google Perftools, http://code.google.com/p/google-perftools.
>>>
>>>
>>> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h?rev=169076&r1=169075&r2=169076&view=diff
>>>
>>> ==============================================================================
>>> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h
>>> (original)
>>> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_procmaps.h Fri Nov
>>> 30 20:39:45 2012
>>> @@ -15,6 +15,7 @@
>>> #define SANITIZER_PROCMAPS_H
>>>
>>> #include "sanitizer_internal_defs.h"
>>> +#include "sanitizer_mutex.h"
>>>
>>> namespace __sanitizer {
>>>
>>> @@ -39,9 +40,14 @@
>>> // address 'addr'. Returns true on success.
>>> bool GetObjectNameAndOffset(uptr addr, uptr *offset,
>>> char filename[], uptr filename_size);
>>> + // In some cases, e.g. when running under a sandbox on Linux, ASan is
>>> unable
>>> + // to obtain the memory mappings. It should fall back to pre-cached
>>> data
>>> + // instead of aborting.
>>> + static void CacheMemoryMappings();
>>> ~MemoryMappingLayout();
>>>
>>> private:
>>> + void LoadFromCache();
>>> // Default implementation of GetObjectNameAndOffset.
>>> // Quite slow, because it iterates through the whole process map for
>>> each
>>> // lookup.
>>> @@ -77,6 +83,12 @@
>>> uptr proc_self_maps_buff_mmaped_size_;
>>> uptr proc_self_maps_buff_len_;
>>> char *current_;
>>> +
>>> + // Static mappings cache.
>>> + static char *cached_proc_self_maps_buff_;
>>> + static uptr cached_proc_self_maps_buff_mmaped_size_;
>>> + static uptr cached_proc_self_maps_buff_len_;
>>> + static StaticSpinMutex cache_lock_; // protects the cache contents.
>>> # elif defined __APPLE__
>>> template<u32 kLCSegment, typename SegmentCommand>
>>> bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
>>>
>>>
>>> _______________________________________________
>>> llvm-commits mailing list
>>> llvm-commits at cs.uiuc.edu
>>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>>
>>
>>
>>
>> --
>> Alexey Samsonov, MSK
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>
>>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20121201/deaac7ef/attachment.html>
More information about the llvm-commits
mailing list