[inferno-npe] 2 new revisions pushed by extrudedaluminiu on 2010-02-07 13:25 GMT

0 views
Skip to first unread message

infer...@googlecode.com

unread,
Feb 7, 2010, 8:25:59 AM2/7/10
to inferno-n...@googlegroups.com
2 new revisions:

Revision: c3dabd3cf0
Author: Venkatesh Srinivas <m...@acm.jhu.edu>
Date: Sat Feb 6 08:24:58 2010
Log: Import libavl from Plan 9 Port into inferno-npe tree....
http://code.google.com/p/inferno-npe/source/detail?r=c3dabd3cf0

Revision: 81e8f54f5e
Author: Venkatesh Srinivas <m...@acm.jhu.edu>
Date: Sun Feb 7 05:24:59 2010
Log: Snapshot of object cache; it is not yet finished, ready, or enabled....
http://code.google.com/p/inferno-npe/source/detail?r=81e8f54f5e

==============================================================================
Revision: c3dabd3cf0
Author: Venkatesh Srinivas <m...@acm.jhu.edu>
Date: Sat Feb 6 08:24:58 2010
Log: Import libavl from Plan 9 Port into inferno-npe tree.

Libavl will be used in the debug code for an object caching allocator,
currently in development.
http://code.google.com/p/inferno-npe/source/detail?r=c3dabd3cf0

Added:
/include/avl.h
/libavl/avl.c
/libavl/mkfile
Modified:
/emu/Linux/emu

=======================================
--- /dev/null
+++ /include/avl.h Sat Feb 6 08:24:58 2010
@@ -0,0 +1,22 @@
+typedef struct Avl Avl;
+typedef struct Avltree Avltree;
+typedef struct Avlwalk Avlwalk;
+
+#pragma incomplete Avltree
+#pragma incomplete Avlwalk
+
+struct Avl
+{
+ Avl *p; /* parent */
+ Avl *n[2]; /* children */
+ int bal; /* balance bits */
+};
+
+Avl *avlnext(Avlwalk *walk);
+Avl *avlprev(Avlwalk *walk);
+Avlwalk *avlwalk(Avltree *tree);
+void deleteavl(Avltree *tree, Avl *key, Avl **oldp);
+void endwalk(Avlwalk *walk);
+void insertavl(Avltree *tree, Avl *new, Avl **oldp);
+Avl *lookupavl(Avltree *tree, Avl *key);
+Avltree *mkavltree(int(*cmp)(Avl*, Avl*));
=======================================
--- /dev/null
+++ /libavl/avl.c Sat Feb 6 08:24:58 2010
@@ -0,0 +1,413 @@
+#include "lib9.h"
+#include "bio.h"
+#include "avl.h"
+
+/*
+ * In-memory database stored as self-balancing AVL tree.
+ * See Lewis & Denenberg, Data Structures and Their Algorithms.
+ */
+
+static void
+singleleft(Avl **tp, Avl *p)
+{
+ int l, r2;
+ Avl *a, *c;
+
+ a = *tp;
+ c = a->n[1];
+
+ r2 = c->bal;
+ l = (r2 > 0? r2: 0)+1 - a->bal;
+
+ if((a->n[1] = c->n[0]) != nil)
+ a->n[1]->p = a;
+
+ if((c->n[0] = a) != nil)
+ c->n[0]->p = c;
+
+ if((*tp = c) != nil)
+ (*tp)->p = p;
+
+ a->bal = -l;
+ c->bal = r2 - ((l > 0? l: 0)+1);
+
+}
+
+static void
+singleright(Avl **tp, Avl *p)
+{
+ int l2, r;
+ Avl *a, *c;
+
+ a = *tp;
+ c = a->n[0];
+ l2 = - c->bal;
+ r = a->bal + ((l2 > 0? l2: 0)+1);
+
+ if((a->n[0] = c->n[1]) != nil)
+ a->n[0]->p = a;
+
+ if((c->n[1] = a) != nil)
+ c->n[1]->p = c;
+
+ if((*tp = c) != nil)
+ (*tp)->p = p;
+
+ a->bal = r;
+ c->bal = ((r > 0? r: 0)+1) - l2;
+}
+
+static void
+doublerightleft(Avl **tp, Avl *p)
+{
+ singleright(&(*tp)->n[1], *tp);
+ singleleft(tp, p);
+}
+
+static void
+doubleleftright(Avl **tp, Avl *p)
+{
+ singleleft(&(*tp)->n[0], *tp);
+ singleright(tp, p);
+}
+
+static void
+balance(Avl **tp, Avl *p)
+{
+ switch((*tp)->bal){
+ case -2:
+ if((*tp)->n[0]->bal <= 0)
+ singleright(tp, p);
+ else if((*tp)->n[0]->bal == 1)
+ doubleleftright(tp, p);
+ else
+ assert(0);
+ break;
+
+ case 2:
+ if((*tp)->n[1]->bal >= 0)
+ singleleft(tp, p);
+ else if((*tp)->n[1]->bal == -1)
+ doublerightleft(tp, p);
+ else
+ assert(0);
+ break;
+ }
+}
+
+static int
+_insertavl(Avl **tp, Avl *p, Avl *r, int (*cmp)(Avl*,Avl*), Avl **rfree)
+{
+ int i, ob;
+
+ if(*tp == nil){
+ r->bal = 0;
+ r->n[0] = nil;
+ r->n[1] = nil;
+ r->p = p;
+ *tp = r;
+ return 1;
+ }
+ ob = (*tp)->bal;
+ if((i = cmp(r, *tp)) != 0){
+ (*tp)->bal += i * _insertavl(&(*tp)->n[(i+1)/2], *tp, r, cmp,
+ rfree);
+ balance(tp, p);
+ return ob == 0 && (*tp)->bal != 0;
+ }
+
+ /* install new entry */
+ *rfree = *tp; /* save old node for freeing */
+ *tp = r; /* insert new node */
+ **tp = **rfree; /* copy old node's Avl contents */
+ if(r->n[0]) /* fix node's children's parent pointers */
+ r->n[0]->p = r;
+ if(r->n[1])
+ r->n[1]->p = r;
+
+ return 0;
+}
+
+static Avl*
+_lookupavl(Avl *t, Avl *r, int (*cmp)(Avl*,Avl*))
+{
+ int i;
+ Avl *p;
+
+ p = nil;
+ while(t != nil){
+ assert(t->p == p);
+ if((i = cmp(r, t)) == 0)
+ return t;
+ p = t;
+ t = t->n[(i+1)/2];
+ }
+ return nil;
+}
+
+static int
+successor(Avl **tp, Avl *p, Avl **r)
+{
+ int ob;
+
+ if((*tp)->n[0] == nil){
+ *r = *tp;
+ *tp = (*r)->n[1];
+ if(*tp)
+ (*tp)->p = p;
+ return -1;
+ }
+ ob = (*tp)->bal;
+ (*tp)->bal -= successor(&(*tp)->n[0], *tp, r);
+ balance(tp, p);
+ return -(ob != 0 && (*tp)->bal == 0);
+}
+
+static int
+_deleteavl(Avl **tp, Avl *p, Avl *rx, int(*cmp)(Avl*,Avl*), Avl **del,
+ void (*predel)(Avl*, void*), void *arg)
+{
+ int i, ob;
+ Avl *r, *or;
+
+ if(*tp == nil)
+ return 0;
+
+ ob = (*tp)->bal;
+ if((i=cmp(rx, *tp)) != 0){
+ (*tp)->bal += i * _deleteavl(&(*tp)->n[(i+1)/2], *tp, rx, cmp,
+ del, predel, arg);
+ balance(tp, p);
+ return -(ob != 0 && (*tp)->bal == 0);
+ }
+
+ if(predel)
+ (*predel)(*tp, arg);
+
+ or = *tp;
+ if(or->n[i=0] == nil || or->n[i=1] == nil){
+ *tp = or->n[1-i];
+ if(*tp)
+ (*tp)->p = p;
+ *del = or;
+ return -1;
+ }
+
+ /* deleting node with two kids, find successor */
+ or->bal += successor(&or->n[1], or, &r);
+ r->bal = or->bal;
+ r->n[0] = or->n[0];
+ r->n[1] = or->n[1];
+ *tp = r;
+ (*tp)->p = p;
+ /* node has changed; fix children's parent pointers */
+ if(r->n[0])
+ r->n[0]->p = r;
+ if(r->n[1])
+ r->n[1]->p = r;
+ *del = or;
+ balance(tp, p);
+ return -(ob != 0 && (*tp)->bal == 0);
+}
+
+static void
+checkparents(Avl *a, Avl *p)
+{
+ if(a == nil)
+ return;
+ if(a->p != p)
+ print("bad parent\n");
+ checkparents(a->n[0], a);
+ checkparents(a->n[1], a);
+}
+
+struct Avltree
+{
+ Avl *root;
+ int (*cmp)(Avl*, Avl*);
+ Avlwalk *walks;
+};
+struct Avlwalk
+{
+ int started;
+ int moved;
+ Avlwalk *next;
+ Avltree *tree;
+ Avl *node;
+};
+
+Avltree*
+mkavltree(int (*cmp)(Avl*, Avl*))
+{
+ Avltree *t;
+
+ t = malloc(sizeof *t);
+ if(t == nil)
+ return nil;
+ memset(t, 0, sizeof *t);
+ t->cmp = cmp;
+ return t;
+}
+
+void
+insertavl(Avltree *t, Avl *new, Avl **oldp)
+{
+ *oldp = nil;
+ _insertavl(&t->root, nil, new, t->cmp, oldp);
+}
+
+Avl*
+lookupavl(Avltree *t, Avl *key)
+{
+ return _lookupavl(t->root, key, t->cmp);
+}
+
+static Avl*
+findpredecessor(Avl *a)
+{
+ if(a == nil)
+ return nil;
+
+ if(a->n[0] != nil){
+ /* predecessor is rightmost descendant of left child */
+ for(a = a->n[0]; a->n[1]; a = a->n[1])
+ ;
+ return a;
+ }else{
+ /* we're at a leaf, successor is a parent we enter from the right */
+ while(a->p && a->p->n[0] == a)
+ a = a->p;
+ return a->p;
+ }
+}
+
+static Avl*
+findsuccessor(Avl *a)
+{
+ if(a == nil)
+ return nil;
+
+ if(a->n[1] != nil){
+ /* successor is leftmost descendant of right child */
+ for(a = a->n[1]; a->n[0]; a = a->n[0])
+ ;
+ return a;
+ }else{
+ /* we're at a leaf, successor is a parent we enter from the left going
up */
+ while(a->p && a->p->n[1] == a)
+ a = a->p;
+ return a->p;
+ }
+}
+
+static void
+walkdel(Avl *a, void *v)
+{
+ Avl *p;
+ Avlwalk *w;
+ Avltree *t;
+
+ if(a == nil)
+ return;
+
+ p = findpredecessor(a);
+ t = v;
+ for(w = t->walks; w; w = w->next){
+ if(w->node == a){
+ /* back pointer to predecessor; not perfect but adequate */
+ w->moved = 1;
+ w->node = p;
+ if(p == nil)
+ w->started = 0;
+ }
+ }
+}
+
+void
+deleteavl(Avltree *t, Avl *key, Avl **oldp)
+{
+ *oldp = nil;
+ _deleteavl(&t->root, nil, key, t->cmp, oldp, walkdel, t);
+}
+
+Avlwalk*
+avlwalk(Avltree *t)
+{
+ Avlwalk *w;
+
+ w = malloc(sizeof *w);
+ if(w == nil)
+ return nil;
+ memset(w, 0, sizeof *w);
+ w->tree = t;
+ w->next = t->walks;
+ t->walks = w;
+ return w;
+}
+
+Avl*
+avlnext(Avlwalk *w)
+{
+ Avl *a;
+
+ if(w->started==0){
+ for(a = w->tree->root; a && a->n[0]; a = a->n[0])
+ ;
+ w->node = a;
+ w->started = 1;
+ }else{
+ a = findsuccessor(w->node);
+ if(a == w->node)
+ abort();
+ w->node = a;
+ }
+ return w->node;
+}
+
+Avl*
+avlprev(Avlwalk *w)
+{
+ Avl *a;
+
+ if(w->started == 0){
+ for(a = w->tree->root; a && a->n[1]; a = a->n[1])
+ ;
+ w->node = a;
+ w->started = 1;
+ }else if(w->moved){
+ w->moved = 0;
+ return w->node;
+ }else{
+ a = findpredecessor(w->node);
+ if(a == w->node)
+ abort();
+ w->node = a;
+ }
+ return w->node;
+}
+
+void
+endwalk(Avlwalk *w)
+{
+ Avltree *t;
+ Avlwalk **l;
+
+ t = w->tree;
+ for(l = &t->walks; *l; l = &(*l)->next){
+ if(*l == w){
+ *l = w->next;
+ break;
+ }
+ }
+ free(w);
+}
+
+static void
+walkavl(Avl *t, void (*f)(Avl*, void*), void *v)
+{
+ if(t == nil)
+ return;
+ walkavl(t->n[0], f, v);
+ f(t, v);
+ walkavl(t->n[1], f, v);
+}
=======================================
--- /dev/null
+++ /libavl/mkfile Sat Feb 6 08:24:58 2010
@@ -0,0 +1,12 @@
+<../mkconfig
+
+LIB=libavl.a
+
+OFILES= \
+ avl.$O\
+
+HFILES=\
+ $ROOT/include/avl.h
+
+<$ROOT/mkfiles/mksyslib-$SHELLTYPE
+
=======================================
--- /emu/Linux/emu Fri Jan 29 02:36:15 2010
+++ /emu/Linux/emu Sat Feb 6 08:24:58 2010
@@ -25,6 +25,7 @@
mem

lib
+ avl
interp
tk
freetype

==============================================================================
Revision: 81e8f54f5e
Author: Venkatesh Srinivas <m...@acm.jhu.edu>
Date: Sun Feb 7 05:24:59 2010
Log: Snapshot of object cache; it is not yet finished, ready, or enabled.

Comments would be appreciated!
http://code.google.com/p/inferno-npe/source/detail?r=81e8f54f5e

Added:
/emu/port/objcache.c
Modified:
/emu/port/dat.h
/emu/port/fns.h

=======================================
--- /dev/null
+++ /emu/port/objcache.c Sun Feb 7 05:24:59 2010
@@ -0,0 +1,339 @@
+// THIS IS AN INTERMEDIATE VERSION NOT YET READY FOR USE
+
+// Object Cache
+// The object cache is a type-specific allocator; each cache only
+// allocates buffers of the same size and runs a constructor on them
+// to place them in an initial state and a destructor to ready them for
+// release.
+//
+// The cache is organized into a series of magazines, arrays of pointers;
+// There are a series of 'links', which contain a loaded magazine and
+// the previous magazine; there is a central collection of magazines,
+// called the Depot.
+//
+// The design is strongly based on the Magazine & Depot layers of
+// Solaris's libumem.
+//
+// References:
+// Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
+// Arbitrary Resources. Bonwick, Adams, 2001.
+
+// TODO:
+// - Magazine size is currently fixed; we should resize mags when
+// contention on the depot lock exceeds a threshold, as in the Solaris
+// kernel design
+// - Depots do not currently release either empty or full magazines to
+// the backing allocator; this needs to be fixed
+// - Currently, every kproc uses the same set of magazines, links[0]; we
+// ought either hash on thread ID or give each kproc its own
+// magazine_link, as in the Solaris kernel
+// - Extend AVL-based check logic and add it to the core malloc/free; also
+// protect it with locks. Also, (in core free), if an object was
allocated
+// from the objcache layer, we'd like to know the PC of the free-r who is
+// not conforming to protocol
+// ...
+// - Experiment with supporting general-purpose *malloc and image
allocations
+// with this allocator.
+//
+
+// $Id: objcache.c,v 1.1 2010/02/06 16:33:42 me Exp me $
+
+#include "dat.h"
+#include "fns.h"
+#include "interp.h"
+#include "error.h"
+#include "avl.h"
+#include <assert.h>
+
+#define INIT_MAGS 4
+#define INIT_MAG_SIZE 32
+
+struct magazine {
+ int rounds; // Number of remaining buffers
+ struct magazine *next; // Linkage used on depot list only
+ void **mag_round; // Resizable array of buffers
+};
+
+struct magazine_link {
+ struct magazine_link *next;
+ Lock l;
+ int size;
+ struct magazine *loaded;
+ struct magazine *prev;
+};
+
+struct objcache {
+ char *name;
+ ulong size;
+ ulong align;
+ int (*ctor)(void *, void *);
+ void (*dtor)(void *, void *);
+ void *priv;
+
+ int allocs;
+ int frees;
+ int bytes_out;
+
+ struct magazine_link links[INIT_MAGS];
+
+ Lock depot_lock;
+ struct magazine *depot_full;
+ struct magazine *depot_empty;
+ int depot_lock_contention;
+};
+
+// xxx: needs locks
+static Avltree* checktree;
+
+struct malloc_addr {
+ Avl avl;
+ void *addr;
+ int type;
+};
+
+// malloc_addr.type
+enum {
+ VOID,
+ OBJCACHE_ALLOCATED_ALLOC,
+ OBJCACHE_ALLOCATED_FREE,
+ RAW_MALLOC_ALLOC,
+ RAW_MALLOC_FREE,
+ UNKNOWN
+};
+
+static int checkaddr(Avl *va, Avl *vb) {
+ struct malloc_addr *a = (struct malloc_addr *) va, *b = (struct
malloc_addr *) vb;
+
+ return (char*) a->addr - (char *) b->addr;
+}
+
+void objcache_init(void) {
+ checktree = mkavltree(checkaddr);
+
+}
+
+objcache*
+objcache_create(char *name, ulong size, ulong align,
+ int (*ctor)(void *obj, void *priv),
+ void (*dtor)(void *obj, void *priv),
+ void *priv)
+{
+ objcache *cache = malloc(sizeof(objcache));
+ int i;
+
+ cache->name = name;
+ cache->size = size;
+ cache->align = align;
+ cache->ctor = ctor;
+ cache->dtor = dtor;
+ cache->priv = priv;
+
+ cache->allocs = cache->frees = cache->bytes_out = 0;
+
+ // xxx: Convert to variable number of magazine links, hash kproc ID to
assign to
+ // magazine link
+
+ for (i = 0; i < INIT_MAGS; i++) {
+ cache->links[i].next = nil;
+ cache->links[i].loaded = nil;
+ cache->links[i].prev = nil;
+ cache->links[i].size = INIT_MAG_SIZE;
+ }
+
+ cache->depot_full = nil;
+ cache->depot_empty = nil;
+ cache->depot_lock_contention = 0;
+
+ return cache;
+}
+
+void*
+objcache_alloc(objcache *cache, int flag)
+{
+ void *obj;
+ int first_time = 1;
+ struct malloc_addr *m_a_check = malloc(sizeof (struct malloc_addr));
+ Avl *old;
+
+ lock(&cache->links[0].l); do {
+
+ // xxx: needs thought about locks; these are per-cache stats
+ if (first_time == 1) {
+ cache->allocs++;
+ cache->bytes_out += cache->size;
+ first_time = 0;
+ }
+
+ // if the loaded magazine has rounds, allocate and return
+ if (cache->links[0].loaded && cache->links[0].loaded->rounds > 0) {
+ obj =
cache->links[0].loaded->mag_round[--cache->links[0].loaded->rounds];
+ break;
+ }
+
+ // if the previous magazine is full, swap it with loaded and try again
+ if (cache->links[0].prev && cache->links[0].prev->rounds ==
cache->links[0].size) {
+ struct magazine *tmp = cache->links[0].prev;
+ cache->links[0].prev = cache->links[0].loaded;
+ cache->links[0].loaded = tmp;
+ continue;
+ }
+
+ // if the depot has any full magazines, return prev, mov loaded to prev,
load a full mag, retry
+
+ int nContended = canlock(&cache->depot_lock);
+ int retry_alloc = 0;
+ if (nContended == 0) {
+ lock (&cache->depot_lock);
+ cache->depot_lock_contention++;
+ } do {
+ if (cache->depot_full != nil) {
+ struct magazine *tmp = cache->depot_full;
+ if (tmp)
+ cache->depot_full = tmp->next;
+
+ // return prev
+ struct magazine *tmp2 = cache->links[0].prev;
+ if (tmp2) {
+ tmp2->next = cache->depot_empty;
+ cache->depot_empty = tmp2;
+ }
+
+ // move loaded to prev
+ cache->links[0].prev = cache->links[0].loaded;
+
+ // load the new mag
+ if (tmp)
+ cache->links[0].loaded = tmp;
+
+ // Retry the allocation, without depot lock held
+ retry_alloc = 1;
+ }
+ } while(0); unlock(&cache->depot_lock);
+ if (retry_alloc == 1)
+ continue;
+
+ } while(0); unlock(&cache->links[0].l);
+
+ // allocate an object from malloc, call the ctor
+ // xxx: extract size and ctor under cache locks
+ // xxx: objcache should check if old entry exists, should check ranges,
not points
+ if (obj == nil) {
+ obj = malloc(cache->size);
+ if (cache->ctor)
+ cache->ctor(obj, cache->priv);
+ m_a_check->addr = obj;
+ m_a_check->type = RAW_MALLOC_ALLOC;
+ insertavl(checktree, &m_a_check->avl, &old);
+ } else {
+ m_a_check->addr = obj;
+ m_a_check->type = OBJCACHE_ALLOCATED_ALLOC;
+ insertavl(checktree, &m_a_check->avl, &old);
+ }
+
+ return obj;
+}
+
+void
+objcache_free(objcache *cache, void *p)
+{
+ int first_time = 1;
+ int do_free = 0;
+ struct malloc_addr a;
+ a.addr = p;
+
+ Avl *tgtx = lookupavl(checktree, &a.avl);
+ struct malloc_addr *tgt = (struct malloc_addr *) tgtx;
+ if (!(tgt->type == OBJCACHE_ALLOCATED_ALLOC || tgt->type ==
RAW_MALLOC_ALLOC))
+ printf("Warning! Addr mismatch on %x caller %x\n", p,
__builtin_return_address(0));
+
+ lock(&cache->links[0].l); do {
+ // xxx: need to think of correct locking/atomics for these.
+ if (first_time == 1) {
+ cache->bytes_out -= cache->size;
+ cache->frees++;
+ first_time = 0;
+ }
+
+ assert(cache->bytes_out >= 0);
+
+ // if the loaded magazine isn't full insert and done
+ if (cache->links[0].loaded && cache->links[0].loaded->rounds <
cache->links[0].size) {
+ cache->links[0].loaded->mag_round[cache->links[0].loaded->rounds++] = p;
+ break;
+ }
+
+ // if the prev magazine is empty, exchange and try again
+ if (cache->links[0].prev && cache->links[0].prev->rounds == 0) {
+ struct magazine *tmp = cache->links[0].prev;
+ cache->links[0].prev = cache->links[0].loaded;
+ cache->links[0].loaded = tmp;
+ continue;
+ }
+
+ // if the depot has any empty magazines, move previous to depot,
+ // move loaded to previous, load empty magazine, continue
+
+ int nContended = canlock(&cache->depot_lock);
+ int retry_free = 0;
+ if (nContended == 0) {
+ lock(&cache->depot_lock);
+ cache->depot_lock_contention++;
+ } do {
+ if (cache->depot_empty != nil) {
+ // move previous to depot_full list
+ int was_prev = 0;
+ if (cache->links[0].prev) {
+ cache->links[0].prev->next = cache->depot_full;
+ was_prev = 1;
+ }
+ // INVARIANT: if cache->links[0].prev was nil, we want to ensure that
we are not
+ // throwing away the entire full set of our depot.
+ if (was_prev == 0)
+ assert(cache->depot_full == nil);
+ cache->depot_full = cache->links[0].prev;
+
+ // move loaded to previous
+ cache->links[0].prev = cache->links[0].loaded;
+
+ // load empty magazine
+ cache->links[0].loaded = cache->depot_empty;
+ cache->depot_empty = cache->links[0].loaded->next;
+ // we would like to be sure that the magazine we just loaded was
actually empty.
+ assert(cache->links[0].loaded->rounds == 0);
+
+ // Retry free
+ retry_free = 1;
+ }
+ } while(0); unlock(&cache->depot_lock);
+ if (retry_free)
+ continue;
+
+ // malloc an empty magazine, put in the depot, continue;
+ // xxx: threshold for 'too many magazines'? is this a good
+ // time to release depot extras?
+ struct magazine *newmag = malloc(sizeof(struct magazine));
+ newmag->rounds = 0;
+ newmag->mag_round = malloc(sizeof(void *) * cache->links[0].size);
+ newmag->next = nil;
+ cache->depot_empty = newmag;
+ continue;
+
+ } while(0); unlock(&cache->links[0].l);
+
+ // return the buffer to the underlying allocator
+ // xxx: want ctor extracted under cache lock
+ if (do_free == 1) {
+ if (cache->dtor)
+ cache->dtor(p, cache->priv);
+ free(p);
+ }
+
+}
+
+void
+objcache_destroy(objcache *cache)
+{
+ // xxx: TODO.
+ // Do we *really* want this? Probably; at least checking the Mainmem
+ // pool on exit is a noble goal.
+}
=======================================
--- /emu/port/dat.h Fri Dec 22 09:07:39 2006
+++ /emu/port/dat.h Sun Feb 7 05:24:59 2010
@@ -16,6 +16,7 @@
typedef struct Mnt Mnt;
typedef struct Mhead Mhead;
typedef struct Osenv Osenv;
+typedef struct objcache objcache;
typedef struct Pgrp Pgrp;
typedef struct Proc Proc;
typedef struct Queue Queue;
@@ -23,7 +24,6 @@
typedef struct Rendez Rendez;
typedef struct Rept Rept;
typedef struct Rootdata Rootdata;
-/*typedef struct RWlock RWlock;*/
typedef struct RWLock RWlock;
typedef struct Procs Procs;
typedef struct Signerkey Signerkey;
@@ -39,6 +39,7 @@

#pragma incomplete Queue
#pragma incomplete Mntrpc
+#pragma incomplete objcache

#include "fcall.h"

=======================================
--- /emu/port/fns.h Tue Aug 25 11:35:34 2009
+++ /emu/port/fns.h Sun Feb 7 05:24:59 2010
@@ -200,6 +200,14 @@
void vmachine(void*);
int walk(Chan**, char**, int, int, int*);
void cleanexit(int);
+void objcache_init(void);
+objcache* objcache_create(char *, ulong, ulong,
+ int (*)(void*, void*),
+ void (*)(void *, void *),
+ void *);
+void* objcache_alloc(objcache*, int);
+void objcache_free(objcache*, void *);
+void objcache_destroy(objcache*);
void oshostintr(Proc*);
void osenter(void);
void osleave(void);

Reply all
Reply to author
Forward
0 new messages