[llvm-commits] [compiler-rt] r169593 - /compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h

Alexey Samsonov samsonov at google.com
Fri Dec 7 10:11:05 PST 2012


On Fri, Dec 7, 2012 at 10:05 AM, Kostya Serebryany <kcc at google.com> wrote:

>
>
> On Fri, Dec 7, 2012 at 9:28 PM, Alexey Samsonov <samsonov at google.com>wrote:
>
>>
>>
>> On Fri, Dec 7, 2012 at 1:40 AM, Kostya Serebryany <kcc at google.com> wrote:
>>
>>> Author: kcc
>>> Date: Fri Dec  7 03:40:17 2012
>>> New Revision: 169593
>>>
>>> URL: http://llvm.org/viewvc/llvm-project?rev=169593&view=rev
>>> Log:
>>> [sanitizer] fix the build on ancient gcc which has stricter rules about
>>> what can be put on TLS. Long term, we absolutely must build the run-times
>>> with the fresh target clang
>>>
>>
>> If we only use clang, how would we know that gcc-build is broken (if
>> we're still interested in it)?
>>
>
> We may have separate bots for other compiler versions we care about.
> But now the situation is like this: someone (e.g. me) runs the build on a
> local machine and it passes, then commits the code and everything is ok,
> but 10 other different compilers (version, sets of warnings, etc) get
> broken.
> OTOH, this probably happens with LLVM proper too and there is not much we
> can do.  :(
>

Yeah, we'll have to rely on bots anyway (and probably do some more effort
to push ASan/TSan to LLVM buildbot infrastructure).


> --kcc
>
>
>>
>>
>>>
>>> Modified:
>>>     compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
>>>
>>> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=169593&r1=169592&r2=169593&view=diff
>>>
>>> ==============================================================================
>>> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
>>> (original)
>>> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Fri Dec
>>>  7 03:40:17 2012
>>> @@ -97,25 +97,29 @@
>>>    AllocatorListNode *next;
>>>  };
>>>
>>> -struct AllocatorFreeList: IntrusiveList<AllocatorListNode> {
>>> -  // Move at most max_count chunks to other_free_list.
>>> -  void BulkAllocate(uptr max_count, AllocatorFreeList *other_free_list)
>>> {
>>> -    CHECK(!empty());
>>> -    CHECK(other_free_list->empty());
>>> -    if (size() <= max_count) {
>>> -      other_free_list->append_front(this);
>>> -      CHECK(empty());
>>> -    } else {
>>> -      for (uptr i = 0; i < max_count; i++) {
>>> -        AllocatorListNode *node = front();
>>> -        pop_front();
>>> -        other_free_list->push_front(node);
>>> -      }
>>> -      CHECK(!empty());
>>> +typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
>>> +
>>> +// Move at most max_count chunks from allocate_from to allocate_to.
>>> +// This function is better be a method of AllocatorFreeList, but we
>>> can't
>>> +// inherit it from IntrusiveList as the ancient gcc complains about
>>> non-PODness.
>>> +static inline void BulkMove(uptr max_count,
>>> +                            AllocatorFreeList *allocate_from,
>>> +                            AllocatorFreeList *allocate_to) {
>>> +  CHECK(!allocate_from->empty());
>>> +  CHECK(allocate_to->empty());
>>> +  if (allocate_from->size() <= max_count) {
>>> +    allocate_to->append_front(allocate_from);
>>> +    CHECK(allocate_from->empty());
>>> +  } else {
>>> +    for (uptr i = 0; i < max_count; i++) {
>>> +      AllocatorListNode *node = allocate_from->front();
>>> +      allocate_from->pop_front();
>>> +      allocate_to->push_front(node);
>>>      }
>>> -    CHECK(!other_free_list->empty());
>>> +    CHECK(!allocate_from->empty());
>>>    }
>>> -};
>>> +  CHECK(!allocate_to->empty());
>>> +}
>>>
>>>  // SizeClassAllocator64 -- allocator for 64-bit address space.
>>>  //
>>> @@ -164,8 +168,7 @@
>>>      if (region->free_list.empty()) {
>>>        PopulateFreeList(class_id, region);
>>>      }
>>> -    region->free_list.BulkAllocate(
>>> -        SizeClassMap::MaxCached(class_id), free_list);
>>> +    BulkMove(SizeClassMap::MaxCached(class_id), &region->free_list,
>>> free_list);
>>>    }
>>>
>>>    // Swallow the entire free_list for the given class_id.
>>> @@ -371,7 +374,7 @@
>>>      SpinMutexLock l(&sci->mutex);
>>>      EnsureSizeClassHasAvailableChunks(sci, class_id);
>>>      CHECK(!sci->free_list.empty());
>>> -    sci->free_list.BulkAllocate(SizeClassMap::MaxCached(class_id),
>>> free_list);
>>> +    BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list,
>>> free_list);
>>>    }
>>>
>>>    // Swallow the entire free_list for the given class_id.
>>> @@ -424,6 +427,7 @@
>>>
>>>    typedef SizeClassMap SizeClassMapT;
>>>    static const uptr kNumClasses = SizeClassMap::kNumClasses;  // 2^k <=
>>> 128
>>> +
>>>   private:
>>>    static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
>>>    static const uptr kRegionSize = 1 << kRegionSizeLog;
>>> @@ -433,7 +437,7 @@
>>>    struct SizeClassInfo {
>>>      SpinMutex mutex;
>>>      AllocatorFreeList free_list;
>>> -    char padding[kCacheLineSize - sizeof(uptr) - sizeof
>>> (AllocatorFreeList)];
>>> +    char padding[kCacheLineSize - sizeof(uptr) -
>>> sizeof(AllocatorFreeList)];
>>>    };
>>>    COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
>>>
>>>
>>>
>>> _______________________________________________
>>> llvm-commits mailing list
>>> llvm-commits at cs.uiuc.edu
>>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>>
>>
>>
>>
>> --
>> Alexey Samsonov, MSK
>>
>>
>


-- 
Alexey Samsonov, MSK
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20121207/b2905e34/attachment.html>


More information about the llvm-commits mailing list