http://groups.google.com/group/comp.programming.threads/msg/bb637f69d079ffba
________________________________________________________________________
/* Simple Macro Support
_____________________________________________________________________*/
#define PLACE_X(t)t
#define PLACE(t)PLACE_X(t)
#define COMMA() ,
#define CONCAT_X(t1, t2)t1##t2
#define CONCAT(t1, t2)CONCAT_X(t1, t2)
#define INVOKE(t)PLACE(t)()
#define REPEAT_1(t)INVOKE(t)
#define REPEAT_2(t, s)REPEAT_1(t)INVOKE(s)REPEAT_1(t)
#define REPEAT_4(t, s)REPEAT_2(t, s)INVOKE(s)REPEAT_2(t, s)
#define REPEAT_8(t, s)REPEAT_4(t, s)INVOKE(s)REPEAT_4(t, s)
#define REPEAT_16(t, s)REPEAT_8(t, s)INVOKE(s)REPEAT_8(t, s)
#define REPEAT_32(t, s)REPEAT_16(t, s)INVOKE(s)REPEAT_16(t, s)
#define REPEAT_64(t, s)REPEAT_32(t, s)INVOKE(s)REPEAT_32(t, s)
#define REPEAT_128(t, s)REPEAT_64(t, s)INVOKE(s)REPEAT_64(t, s)
#define REPEAT_256(t, s)REPEAT_128(t, s)INVOKE(s)REPEAT_128(t, s)
#define REPEAT_512(t, s)REPEAT_256(t, s)INVOKE(s)REPEAT_256(t, s)
#define REPEAT_1024(t, s)REPEAT_512(t, s)INVOKE(s)REPEAT_512(t, s)
#define REPEAT_2048(t, s)REPEAT_1024(t, s)INVOKE(s)REPEAT_1024(t, s)
#define REPEAT(t, s, d)CONCAT(REPEAT_, d)(t, s)
#define ARRAY_DEPTH(mp_this) ( \
sizeof(mp_this) / sizeof((mp_this)[0]) \
)
/* Simple Hashed Read/Write Locking
_____________________________________________________________________*/
#define _XOPEN_SOURCE 600
#include <pthread.h>
#include <stddef.h>
#include <assert.h>
#include <stdio.h>
#define RWHASH_PTR(mp_ptr, mp_depth) ( \
((((size_t)(mp_ptr)) * 307U) >> 8U) & (((size_t)(mp_depth)) - 1U) \
)
#if ! defined(RWHASH_SET_DEPTH)
# define RWHASH_SET_DEPTH 32
#endif
#if ! defined(RWHASH_TABLE_DEPTH)
# define RWHASH_TABLE_DEPTH 512
#endif
#define RWHASH_TABLE_PLACE() PTHREAD_RWLOCK_INITIALIZER
static pthread_rwlock_t g_rwhash_sys_table[] = {
REPEAT(RWHASH_TABLE_PLACE, COMMA, RWHASH_TABLE_DEPTH)
};
struct rwhash_node {
struct rwhash_node* next;
pthread_rwlock_t* rw;
};
struct rwhash {
struct rwhash_node* acquire;
struct rwhash_node* release;
size_t offset;
struct rwhash_node nodes[RWHASH_SET_DEPTH];
};
#define RWHASH_INITIALIZER { NULL }
static struct rwhash_node*
rwhash_sys_pop_node(
struct rwhash* const self,
pthread_rwlock_t* const rw
) {
size_t const offset = self->offset;
if (offset + 1 <= RWHASH_SET_DEPTH) {
struct rwhash_node* const node = self->nodes + offset;
node->rw = rw;
self->offset = offset + 1;
return node;
}
return NULL;
}
static void
rwhash_sys_push_last_node(
struct rwhash* const self
) {
--self->offset;
}
static void
rwhash_sys_flush_nodes(
struct rwhash* const self
) {
self->offset = 0;
}
static void
rwhash_reset(
struct rwhash* const self
) {
rwhash_sys_flush_nodes(self);
self->acquire = NULL;
self->release = NULL;
}
static int
rwhash_push(
struct rwhash* const self,
void const* const ptr
) {
struct rwhash_node* const node =
rwhash_sys_pop_node(self, g_rwhash_sys_table +
RWHASH_PTR(ptr, ARRAY_DEPTH(g_rwhash_sys_table)));
if (node) {
struct rwhash_node* prev = NULL;
struct rwhash_node* cur = self->acquire;
if (! cur) {
node->next = NULL;
self->acquire = node;
return 0;
}
while (cur) {
if (cur->rw > node->rw) {
if (! prev) {
node->next = cur;
self->acquire = node;
} else {
node->next = prev->next;
prev->next = node;
}
return 0;
} else if (cur->rw == node->rw) {
rwhash_sys_push_last_node(self);
return 0;
}
prev = cur;
cur = cur->next;
}
prev->next = node;
node->next = NULL;
return 0;
}
return -1;
}
static int
rwhash_wrlock(
struct rwhash* const self
) {
struct rwhash_node* node = self->acquire;
while (node) {
struct rwhash_node* const next = node->next;
pthread_rwlock_wrlock(node->rw);
printf("wrlock %p\n", (void*)node->rw);
assert((next) ? node->rw < next->rw : 1);
node->next = self->release;
self->release = node;
node = next;
}
self->acquire = NULL;
return 0;
}
static int
rwhash_rdlock(
struct rwhash* const self
) {
struct rwhash_node* node = self->acquire;
while (node) {
struct rwhash_node* const next = node->next;
pthread_rwlock_rdlock(node->rw);
printf("rdlock %p\n", (void*)node->rw);
assert((next) ? node->rw < next->rw : 1);
node->next = self->release;
self->release = node;
node = next;
}
self->acquire = NULL;
return 0;
}
static int
rwhash_unlock(
struct rwhash* const self
) {
struct rwhash_node* node = self->release;
while (node) {
struct rwhash_node* const next = node->next;
printf("unlock %p\n", (void*)node->rw);
assert((next) ? node->rw > next->rw : 1);
pthread_rwlock_unlock(node->rw);
node->next = self->acquire;
self->acquire = node;
node = next;
}
self->release = NULL;
return 0;
}
/* Simple Usage Example
_____________________________________________________________________*/
#include <stdio.h>
static unsigned g_count[] = { 1, 2, 3, 4, 5, 6, 7 };
int main(void) {
struct rwhash rwhash = RWHASH_INITIALIZER;
rwhash_push(&rwhash, &g_count[0]);
rwhash_push(&rwhash, &g_count[3]);
rwhash_push(&rwhash, &g_count[1]);
rwhash_push(&rwhash, &g_count[2]);
rwhash_wrlock(&rwhash);
/* write locked */
rwhash_unlock(&rwhash);
puts("------------------------------------");
rwhash_push(&rwhash, &g_count[6]);
rwhash_push(&rwhash, &g_count[4]);
rwhash_push(&rwhash, &g_count[5]);
rwhash_wrlock(&rwhash);
/* write locked */
rwhash_unlock(&rwhash);
puts("------------------------------------");
rwhash_reset(&rwhash);
rwhash_push(&rwhash, &g_count[6]);
rwhash_push(&rwhash, &g_count[4]);
rwhash_push(&rwhash, &g_count[5]);
rwhash_push(&rwhash, &g_count[0]);
rwhash_push(&rwhash, &g_count[3]);
rwhash_rdlock(&rwhash);
/* read locked */
rwhash_unlock(&rwhash);
puts("------------------------------------");
rwhash_push(&rwhash, &g_count[6]);
rwhash_push(&rwhash, &g_count[4]);
rwhash_push(&rwhash, &g_count[5]);
rwhash_push(&rwhash, &g_count[0]);
rwhash_reset(&rwhash);
rwhash_push(&rwhash, &g_count[6]);
rwhash_push(&rwhash, &g_count[0]);
rwhash_push(&rwhash, &g_count[4]);
rwhash_rdlock(&rwhash);
/* read locked */
rwhash_unlock(&rwhash);
return 0;
}
________________________________________________________________________
This is portable to many systems with good support for POSIX Threads. It's
using a naive sorting mechanism, probably can be improved on quite a bit.
Anyway, it works and will allow one to atomically acquire write/read access
to N objects in a single shot. One can use it for a lock-based STM. It also
helps amortize the number of locks one needs to protect large shared
collections. It drastically help reduce chance of deadlock. It can simplifie
complex locking schemes. No try-lock spinnings.
Anyway, I think it might be helpful to somebody...
;^D
you can add any error checking you want to the
`pthread_rwlock_rd/wrlock/unlock()' functions. I omitted it because I could
not make up my mind how to handle failure cases within the middle of a
unlock set. If `pthread_rwlock_unlock()' fails in middle of set, should I
continue to unlock the set and return error, return error and leave half of
set locked, or lock the unlocked locks and return error with a complete
locked set. Humm...
It can easily support mixed locking... I am not sure whether it then
become more useful or not...
rwhash_push_write(&rwhash, &g_count[0]);
rwhash_push_read(&rwhash, &g_count[3]);
rwhash_push_write(&rwhash, &g_count[1]);
rwhash_push_read(&rwhash, &g_count[2]);
rwhash_just_lock(&rwhash);
/* mixed locked */
rwhash_unlock(&rwhash);
What do you think?
Dmitriy V'jukov
> It can easily support mixed locking... I am not sure whether it then
> become more useful or not...
> rwhash_push_write(&rwhash, &g_count[0]);
> rwhash_push_read(&rwhash, &g_count[3]);
> rwhash_push_write(&rwhash, &g_count[1]);
> rwhash_push_read(&rwhash, &g_count[2]);
> rwhash_just_lock(&rwhash);
> /* mixed locked */
> rwhash_unlock(&rwhash);
> What do you think?
Humm. Well, it might make hash collisions a bit odd. What to do about:
rwhash_push_read(&rwhash, &g_count[0]);
rwhash_push_write(&rwhash, &g_count[3]);
if the pointers `&g_count[0]' and `&g_count[3]' hash to the same value?
Would the lock set be read or write access? I can reason about fixing this
issue with access priority. For instance, if read access for addr X was
pushed onto the list, and a subsequent write access is also for addr X,
well, prior read would be converted into the subsequent write. That way,
hash collision will result in strongest synchronization guarantee; write >
read! This is important. You don't want code to assume it gets write access
when a read hash collision already exists in the lock set. Write hash needs
to be able to supercede and replace read hash collision.
Does that make sense to you?