Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

simple reap allocator...

24 views
Skip to first unread message

Chris M. Thomasson

unread,
Dec 10, 2008, 11:17:29 PM12/10/08
to
Here is the crude code:
_____________________________________________________________________________
#include <cstdio>
#include <cassert>
#include <cstdlib>
#include <climits>
#include <new>


template<typename T>
struct dlink {
dlink* m_next;
dlink* m_prev;


void ctor() {
m_next = m_prev = this;
}


private:
inline static void prv_insert(
dlink* n,
dlink* prev,
dlink* next
) throw() {
next->m_prev = n;
n->m_next = next;
n->m_prev = prev;
prev->m_next = n;
}


inline static void prv_remove(
dlink* prev,
dlink* next
) throw() {
next->m_prev = prev;
prev->m_next = next;
}


public:
inline void push_head(dlink* n) throw() {
prv_insert(n, this, m_next);
}


inline void push_tail(dlink* n) throw() {
prv_insert(n, m_prev, this);
}


inline void pop() throw() {
prv_remove(m_prev, m_next);
}


inline T* get_head() throw() {
return m_next->get();
}


inline T* get_tail() throw() {
return m_prev->get();
}


inline T* get() throw() {
return (T*)this;
}
};


#define BITSIZE (sizeof(unsigned) * CHAR_BIT)


template<typename T, std::size_t DEPTH = 1024 * 4, std::size_t THRESHOLD =
2>
class reap_allocator {

struct reap : public dlink<reap> {
union block {
block* m_next;
double m_align;
unsigned char m_buf[sizeof(T)];
};


struct allocation {
reap* m_reap;
block* m_block;
block* m_next;
bool m_flist;
};


struct map_node {
std::size_t m_offset;
std::size_t m_bit;
};


block m_blocks[DEPTH];
block* m_flist;
std::size_t m_offset;
std::size_t m_count;
unsigned m_map[DEPTH / BITSIZE];


static reap* create() {
reap* const r = (reap*)std::calloc(1, sizeof(*r));
if (! r) { throw std::bad_alloc(); }
return r;
}

void destroy() {
flush();
std::free(this);
}

void get_map_node(void* const mem_, map_node& mn) {
unsigned char* const mem = (unsigned char*)mem_;
mn.m_offset = (size_t)((mem - (unsigned char*)m_blocks) / sizeof(T));
mn.m_bit = 1 << (mn.m_offset & (BITSIZE - 1));
mn.m_offset /= BITSIZE;
}

void set_bit(map_node& mn) {
m_map[mn.m_offset] |= mn.m_bit;
}

void clear_bit(map_node& mn) {
m_map[mn.m_offset] &= ~mn.m_bit;
}

bool is_bit(map_node& mn) {
return m_map[mn.m_offset] & mn.m_bit ? true : false;
}

bool allocate(allocation& a) {
a.m_reap = this;
a.m_block = m_flist;
if (a.m_block) {
a.m_next = a.m_block->m_next;
a.m_flist = true;
return true;
} else {
size_t const offset = m_offset;
if (offset + 1 <= DEPTH) {
a.m_block = m_blocks + offset;
a.m_flist = false;
return true;
}
}
a.m_block = NULL;
return false;
}

void allocate_commit(allocation& a) {
map_node mn;
get_map_node(a.m_block, mn);
set_bit(mn);
++m_count;
if (a.m_flist) {
m_flist = a.m_next;
} else {
++m_offset;
}
}

std::size_t deallocate(void* const mem) {
block* const b = (block*)mem;
map_node mn;
get_map_node(b, mn);
clear_bit(mn);
b->m_next = m_flist;
m_flist = b;
--m_count;
return m_count;
}

void flush() {
map_node mn;
for (unsigned i = 0; i < m_offset && m_count; ++i) {
get_map_node(m_blocks + i, mn);
if (is_bit(mn)) {
try { ((T*)m_blocks[i].m_buf)->~T(); } catch (...) {}
clear_bit(mn);
--m_count;
}
}
assert(! m_count);
m_offset = 0;
m_flist = NULL;
}

bool is_block(void* const mem) {
unsigned char* const b = (unsigned char*)mem;
unsigned char* const s = (unsigned char*)m_blocks;
unsigned char* const e = (unsigned char*)(m_blocks + DEPTH);
return (b >= s && b < e);
}
};


dlink<reap> m_reaps;
std::size_t m_count;


reap* prv_expand() {
reap* const r = reap::create();
m_reaps.push_head(r);
++m_count;
return r;
}


void prv_shrink(reap* const r) {
r->pop();
r->destroy();
--m_count;
}


reap* prv_find(void* const mem) {
reap* r = m_reaps.get_head();
while (r != &m_reaps) {
if (r->is_block(mem)) {
return r;
}
r = r->m_next->get();
}
assert(false);
std::unexpected();
return NULL;
}


typedef typename reap::allocation allocation;


void prv_allocate(allocation& a) {
reap* r = m_reaps.get_head();
while (r != &m_reaps) {
if (r->allocate(a)) {
return;
}
r = r->m_next->get();
}
prv_expand()->allocate(a);
}


#define REAP_SYS_ALLOCATE_X(mp_params) \
allocation a; \
prv_allocate(a); \
T* const obj = new (a.m_block) T mp_params; \
a.m_reap->allocate_commit(a); \
return obj

#define REAP_SYS_ALLOCATE(mp_params) \
REAP_SYS_ALLOCATE_X(mp_params)


public:
class fguard { // flush RAII guard
reap_allocator& m_handle;
public:
fguard(reap_allocator& handle) : m_handle(handle) {}
~fguard() { m_handle.flush(); }
};


reap_allocator() : m_count(0) {
m_reaps.ctor();
prv_expand();
}


~reap_allocator() {
flush();
prv_shrink(m_reaps.get_head());
}


void flush() {
reap* r = m_reaps.get_head();
while (r != &m_reaps) {
reap* const next = r->m_next->get();
if (m_count > 1) {
prv_shrink(r);
} else {
r->flush();
}
r = next;
}
}


void deallocate(T* const obj) {
try { obj->~T(); } catch (...) {}
// TODO: make `deallocate()' an `O(1)' procedure!
// (e.g., remove `prv_find()'); use alignment mask instead...
reap* const r = prv_find(obj);
if (! r->deallocate(obj) && m_count > THRESHOLD) {
prv_shrink(r);
}
}


T* allocate() {
REAP_SYS_ALLOCATE(());
}


template<typename P1>
T* allocate(P1 p1) {
REAP_SYS_ALLOCATE((p1));
}
};


struct foo {
foo* m_next;

foo(foo* next = NULL) : m_next(next) {
std::printf("(%p)->foo::foo(%p)\n", (void*)this, (void*)next);
}

~foo() {
std::printf("(%p)->foo::~foo() - %p\n", (void*)this, (void*)m_next);
}
};


int main() {

{
reap_allocator<foo> a;
foo* head = NULL;

for (unsigned r = 0; r < 25; ++r) {
reap_allocator<foo>::fguard fg(a);

for (unsigned i = 0; i < (r + 1) * 10; ++i) {
head = a.allocate(head);
}

foo* h = head->m_next->m_next->m_next->m_next;
while (h) {
foo* n = h->m_next;
a.deallocate(h);
h = n;
}

a.allocate();
a.allocate();
a.allocate();
a.allocate();
a.allocate();
a.allocate();
a.deallocate(a.allocate());

head = NULL;
}
}

return 0;
}
_____________________________________________________________________________


As you can see, the reap allocator uses a very simple, fine-grain bitmap
algorithm to ensure that calls to `reap_allocator<T>::flush()' do not call
the dtor of a free object. This algorithm acts as both region and heap
allocation schemes. There is a paper on the idea of merging regions/heaps:

http://www.cs.umass.edu/~emery/pubs/berger-oopsla2002.pdf

IMVHO, its fairly useful. Its also completely exception-safe; the following
code is not a memory leak:


reap_allocator<foo> ralloc;
for (;;) {
reap_allocator<foo>::fguard fg(ralloc);
ralloc.allocate(); ralloc.allocate();
ralloc.allocate(); ralloc.allocate();
ralloc.allocate(); ralloc.allocate();
}


`reap_allocator<T>::allocate()' can potentially throw because it invokes
ctor of object `T'. Luckily, this does not adversely effect state of the
allocator `ralloc' and/or the application as no memory leaks occur. The RAII
object `fg' will automatically flush the local instance of the reap object
`ralloc' before the next iteration of the naked `for (;;)' loop.

0 new messages