[llvm-commits] [poolalloc] r57776 - in /poolalloc/trunk/include/poolalloc_runtime: PoolAllocator.h Support/SplayTree.h test.ex
Andrew Lenharth
alenhar2 at cs.uiuc.edu
Sat Oct 18 15:01:30 PDT 2008
Author: alenhar2
Date: Sat Oct 18 17:01:29 2008
New Revision: 57776
URL: http://llvm.org/viewvc/llvm-project?rev=57776&view=rev
Log:
Bitmask allocator that uses seperate allocators from metadata v.s. data
Modified:
poolalloc/trunk/include/poolalloc_runtime/PoolAllocator.h
poolalloc/trunk/include/poolalloc_runtime/Support/SplayTree.h
poolalloc/trunk/include/poolalloc_runtime/test.ex
Modified: poolalloc/trunk/include/poolalloc_runtime/PoolAllocator.h
URL: http://llvm.org/viewvc/llvm-project/poolalloc/trunk/include/poolalloc_runtime/PoolAllocator.h?rev=57776&r1=57775&r2=57776&view=diff
==============================================================================
--- poolalloc/trunk/include/poolalloc_runtime/PoolAllocator.h (original)
+++ poolalloc/trunk/include/poolalloc_runtime/PoolAllocator.h Sat Oct 18 17:01:29 2008
@@ -1,6 +1,9 @@
#include "poolalloc_runtime/Support/SplayTree.h"
#include "llvm/ADT/hash_map.h"
+#include <algorithm>
+#include <cassert>
#include <cstdlib>
+#include <sys/mman.h>
template<class SlabManager >
class PoolAllocator : SlabManager {
@@ -62,9 +65,20 @@
DAlloc allocator;
+ struct dealloc_actor {
+ MallocSlabManager* m;
+ void operator()(void*& start, void*& end) {
+ m->allocator.deallocate((char*) start,((char*)end-(char*)start) + 1);
+ }
+ dealloc_actor(MallocSlabManager* _m) : m(_m) {}
+ };
+
protected:
MallocSlabManager(unsigned Osize, unsigned Alignment) : objsize(Osize) {}
-
+ ~MallocSlabManager() {
+ dealloc_actor act(this);
+ objs.clear(act);
+ }
void* slab_alloc(unsigned num) {
void* x = allocator.allocate(num*objsize);
objs.insert(x, (char*)x + num*objsize - 1);
@@ -81,9 +95,6 @@
bool slab_valid(void* obj) {
return objs.find(obj);
}
- bool slab_contains(void* obj) {
- return objs.find(obj);
- }
bool slab_managed(void* obj) {
return objs.find(obj);
}
@@ -92,10 +103,165 @@
}
};
-template<class PageManager, unsigned PageShiftAmount>
+template<class PageManager, unsigned PageShiftAmount = 6, unsigned load = 80,
+ class SafeAllocator = std::allocator<void> >
class BitMaskSlabManager {
- hash_map<void*, std::pair<unsigned, unsigned*> >slabmetadata;
-
+ struct slab_metadata {
+ void* data;
+ unsigned* free_bitmask;
+ };
+
+ typedef typename SafeAllocator::template rebind<std::pair<void*, slab_metadata> >::other SAlloc;
+ typedef typename SafeAllocator::template rebind<unsigned>::other SBMAlloc;
+ typedef hash_map<void*, slab_metadata , hash<void*>, std::equal_to<void*>, SAlloc> MapTy;
+
+ MapTy slabmetadata;
+ hash_map<void*, void*, hash<void*>, std::equal_to<void*>, SAlloc> mappedPages;
+
+ SBMAlloc BMAlloc;
+
unsigned objsize;
+ slab_metadata* CurAllocSlab;
+ unsigned totalslots;
+ unsigned totalallocs;
+
+ unsigned numObjsPerSlab() const {
+ return (PageManager::pageSize << PageShiftAmount) / objsize;
+ }
+
+ unsigned numIntsPerSlabMeta() const {
+ return (numObjsPerSlab() + (sizeof(unsigned) * 8 - 1)) / (sizeof(unsigned) * 8);
+ }
+
+ slab_metadata* getSlabForObj(void* obj) {
+ intptr_t ptr = (intptr_t)obj;
+ ptr &= ~(PageManager::pageSize - 1);
+ void* page = (void*)ptr;
+ page = mappedPages[page];
+ typename MapTy::iterator ii = slabmetadata.find(page);
+ if (ii == slabmetadata.end()) return 0;
+ return &ii->second;
+ }
+
+ unsigned getObjLoc(slab_metadata* slab, void* obj) {
+ return ((char*)obj - (char*)(slab->data)) / objsize;
+ }
+
+ unsigned findFree(slab_metadata* slab) const {
+ // FIXME: free_bitmask is treated as a linear array. A bit-tree representation would be faster
+
+ for (unsigned y = 0; y < numIntsPerSlabMeta(); ++y) {
+ unsigned zone = slab->free_bitmask[y];
+ if (zone != ~0)
+ for (unsigned x = 0; x < sizeof(unsigned); ++x)
+ if ((zone >> (x * 8) & 0x00FF) != 0x00FF)
+ for (int z = 0; z < 8; ++z)
+ if (!(zone & 1 << (x * 8 + z)))
+ return y * sizeof(unsigned) + x * 8 + z;
+ }
+ return ~0;
+ }
+
+ bool isFree(slab_metadata* slab, unsigned loc) const {
+ return !(slab->free_bitmask[loc / sizeof(unsigned)] & ( 1 << (loc % sizeof(unsigned))));
+ }
+
+ void setFree(slab_metadata* slab, unsigned loc) {
+ slab->free_bitmask[loc / sizeof(unsigned)] &= ~( 1 << (loc % sizeof(unsigned)));
+ --totalallocs;
+ }
+
+ void setUsed(slab_metadata* slab, unsigned loc) {
+ slab->free_bitmask[loc / sizeof(unsigned)] |= ( 1 << (loc % sizeof(unsigned)));
+ ++totalallocs;
+ }
+
+ void createOrSetNewSlab() {
+ if (!totalslots || ((totalallocs * 100) / totalslots > load)) {
+ // Create new slab
+ void* mem = PageManager::getPages(1 << PageShiftAmount);
+ slab_metadata& slab = slabmetadata[mem];
+ slab.data = mem;
+ slab.free_bitmask = BMAlloc.allocate(numIntsPerSlabMeta());
+ std::fill(slab.free_bitmask, slab.free_bitmask + numIntsPerSlabMeta(), 0);
+ CurAllocSlab = &slab;
+ totalslots += numObjsPerSlab();
+ for (unsigned x = 0; x < (1 << PageShiftAmount); ++x)
+ mappedPages[(void*) &(((char*)mem)[x * PageManager::pageSize])] = mem;
+ } else {
+ // Find a slab with some free space
+ for (typename MapTy::iterator ii = slabmetadata.begin(),
+ ee = slabmetadata.end(); ii != ee; ++ii)
+ if (findFree(&ii->second)) {
+ CurAllocSlab = &ii->second;
+ break;
+ }
+ }
+ }
+
+ protected:
+ BitMaskSlabManager(unsigned Osize, unsigned Alignment)
+ :objsize(Osize), CurAllocSlab(0), totalslots(0), totalallocs(0)
+ {}
+ ~BitMaskSlabManager() {
+ for (typename MapTy::const_iterator ii = slabmetadata.begin(),
+ ee = slabmetadata.end(); ii != ee; ++ii) {
+ PageManager::freePages(ii->second.data, 1 << PageShiftAmount);
+ BMAlloc.deallocate(ii->second.free_bitmask, numIntsPerSlabMeta());
+ }
+ }
+
+ void* slab_alloc(unsigned num) {
+ if (num > 1) {
+ assert(0 && "Only size 1 allowed");
+ abort();
+ }
+ if (!CurAllocSlab)
+ createOrSetNewSlab();
+ unsigned loc = findFree(CurAllocSlab);
+ if (loc == ~0) {
+ CurAllocSlab = 0;
+ return slab_alloc(num);
+ }
+ setUsed(CurAllocSlab, loc);
+ return &((char*)CurAllocSlab->data)[loc * objsize];
+ }
+ void slab_free(void* obj) {
+ slab_metadata* slab = getSlabForObj(obj);
+ if (!slab) {
+ assert(0 && "Freeing invalid object");
+ abort();
+ }
+ setFree(slab, getObjLoc(slab, obj));
+ }
+ bool slab_valid(void* obj) {
+ slab_metadata* slab = getSlabForObj(obj);
+ if (!slab) return false;
+ return !isFree(slab, getObjLoc(slab, obj));
+ }
+ bool slab_managed(void* obj) {
+ slab_metadata* slab = getSlabForObj(obj);
+ return slab != 0;
+ }
+ bool slab_getbounds(void* obj, void*& start, void*& end) {
+ slab_metadata* slab = getSlabForObj(obj);
+ if (!slab) return false;
+ unsigned loc = getObjLoc(slab, obj);
+ if (isFree(slab, loc)) return false;
+ start = &(char*)(slab->data)[loc * objsize];
+ end = &(char*)(slab->data)[loc * objsize] - 1;
+ return true;
+ }
+};
+class LinuxMmap {
+ public:
+ enum d {pageSize = 4096};
+ static void* getPages(unsigned num) {
+ return mmap(0, pageSize * num, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ }
+ static void freePages(void* page, unsigned num) {
+ munmap(page, num * pageSize);
+ }
};
+
Modified: poolalloc/trunk/include/poolalloc_runtime/Support/SplayTree.h
URL: http://llvm.org/viewvc/llvm-project/poolalloc/trunk/include/poolalloc_runtime/Support/SplayTree.h?rev=57776&r1=57775&r2=57776&view=diff
==============================================================================
--- poolalloc/trunk/include/poolalloc_runtime/Support/SplayTree.h (original)
+++ poolalloc/trunk/include/poolalloc_runtime/Support/SplayTree.h Sat Oct 18 17:01:29 2008
@@ -4,6 +4,8 @@
struct range_tree_node {
range_tree_node(void* s, void* e) : left(0), right(0), start(s), end(e) {}
range_tree_node() : left(0), right(0), start(0), end(0) {}
+ template<class O>
+ void do_act(O& act) { act(start, end, data); }
range_tree_node* left;
range_tree_node* right;
void* start;
@@ -15,6 +17,8 @@
struct range_tree_node <void>{
range_tree_node(void* s, void* e) : left(0), right(0), start(s), end(e) {}
range_tree_node() : left(0), right(0), start(0), end(0) {}
+ template<class O>
+ void do_act(O& act) { act(start, end); }
range_tree_node* left;
range_tree_node* right;
void* start;
@@ -105,6 +109,16 @@
__node_alloc.deallocate(t, 1);
}
+ template<class O>
+ void __clear_internal(tree_node* t, O& act) {
+ if (!t) return;
+ __clear_internal(t->left);
+ __clear_internal(t->right);
+ t->do_act(act);
+ __node_alloc.destroy(t);
+ __node_alloc.deallocate(t, 1);
+ }
+
public:
explicit RangeSplayTree(const _Alloc& a) :__node_alloc(a), Tree(0) {}
@@ -162,6 +176,12 @@
Tree = 0;
}
+ template <class O>
+ void __clear(O& act) {
+ __clear_internal(Tree, act);
+ Tree = 0;
+ }
+
tree_node* __find(void* key) {
if (!Tree) return false;
Tree = splay(Tree, key);
@@ -192,7 +212,10 @@
bool count() { return Tree.__count(); }
void clear() { Tree.__clear(); }
-
+
+ template <class O>
+ void clear(O& act) { Tree.__clear(act); }
+
bool find(void* key, void*& start, void*& end) {
range_tree_node<void>* t = Tree.__find(key);
if (!t) return false;
@@ -210,37 +233,40 @@
template<typename T, class Allocator = std::allocator<T> >
class RangeSplayMap {
RangeSplayTree<range_tree_node<T>, Allocator> Tree;
-
- public:
- explicit RangeSplayMap(const Allocator& A= Allocator() )
+
+ public:
+ explicit RangeSplayMap(const Allocator& A= Allocator() )
: Tree(A) {}
-
- bool insert(void* start, void* end, T& d) {
- range_tree_node<T>* t = Tree.__insert(start,end);
- if (t == 0) return false;
- t->data = d;
- return true;
- }
-
- bool remove(void* key) {
- return Tree.__remove(key);
- }
-
- bool count() { return Tree.__count(); }
-
- void clear() { Tree.__clear(); }
-
- bool find(void* key, void*& start, void*& end, T& d) {
- range_tree_node<T>* t = Tree.__find(key);
- if (!t) return false;
- start = t->start;
- end = t->end;
- d = t->data;
- return true;
- }
- bool find(void* key) {
- range_tree_node<T>* t = Tree.__find(key);
- if (!t) return false;
- return true;
- }
+
+ bool insert(void* start, void* end, T& d) {
+ range_tree_node<T>* t = Tree.__insert(start,end);
+ if (t == 0) return false;
+ t->data = d;
+ return true;
+ }
+
+ bool remove(void* key) {
+ return Tree.__remove(key);
+ }
+
+ bool count() { return Tree.__count(); }
+
+ void clear() { Tree.__clear(); }
+
+ template <class O>
+ void clear(O& act) { Tree.__clear(act); }
+
+ bool find(void* key, void*& start, void*& end, T& d) {
+ range_tree_node<T>* t = Tree.__find(key);
+ if (!t) return false;
+ start = t->start;
+ end = t->end;
+ d = t->data;
+ return true;
+ }
+ bool find(void* key) {
+ range_tree_node<T>* t = Tree.__find(key);
+ if (!t) return false;
+ return true;
+ }
};
Modified: poolalloc/trunk/include/poolalloc_runtime/test.ex
URL: http://llvm.org/viewvc/llvm-project/poolalloc/trunk/include/poolalloc_runtime/test.ex?rev=57776&r1=57775&r2=57776&view=diff
==============================================================================
--- poolalloc/trunk/include/poolalloc_runtime/test.ex (original)
+++ poolalloc/trunk/include/poolalloc_runtime/test.ex Sat Oct 18 17:01:29 2008
@@ -2,6 +2,7 @@
#include <iostream>
PoolAllocator<MallocSlabManager<> > a(10, 16);
+PoolAllocator<BitMaskSlabManager<LinuxMmap> > b(8, 8);
RangeSplayMap<unsigned> x;
@@ -9,6 +10,10 @@
void* x = a.alloc();
std::cerr << a.isAllocated(x) << " " << a.isAllocated((char*)x + 5) << " " << a.isAllocated((char*)x + 10) << "\n";
a.dealloc(x);
+
+ x = b.alloc();
+ std::cerr << b.isAllocated(x) << " " << b.isAllocated((char*)x + 5) << " " << b.isAllocated((char*)x + 10) << "\n";
+ b.dealloc(x);
return 0;
}
More information about the llvm-commits
mailing list