Fwiw, this is just a little experiment. I got some free time and decided
to go back into lock-free a bit. This code, in Relacy:
https://github.com/dvyukov/relacy
is basically C++11 so its easy to convert to a standalone C++11 program.
I like to code up in Relacy to run things through its race detector.
Anyway, this early stage does not use any locks to insert new nodes into
random places within a LIFO. However, it does not destroy any nodes
during the test. The fun part is that I can add lock-free delete using
something like RCU, or a proxy collector in pure C++11 to allow for safe
and dynamic deallocation of nodes. The meat is in struct
ct_strange_stack. Humm... I think I might be able to get rid of one of
the acquire membars in:
ct_strange_stack::iterate
ct_strange_stack::find_random_node
ct_strange_stack::dump
Humm... Anyway, here is the code:
________________________________________
/*
Lock-Free Insert Experiment
by: Chris M. Thomasson
_____________________________________________________*/
//#define RL_DEBUGBREAK_ON_ASSERT
//#define RL_MSVC_OUTPUT
//#define RL_FORCE_SEQ_CST
//#define RL_GC
#include <relacy/relacy_std.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
#define CT_READERS 4
#define CT_WRITERS 3
#define CT_THREADS (CT_WRITERS + CT_READERS)
struct ct_strange_stack
{
struct node
{
std::atomic<node*> m_next;
};
std::atomic<node*> m_head;
ct_strange_stack() : m_head(nullptr) {}
~ct_strange_stack()
{
dump(); // finally, deallocate... ;^)
}
void push_head(node* n)
{
push_node(m_head, n);
}
void push_node(std::atomic<node*>& head, node* n)
{
node* cmp = m_head.load(std::memory_order_relaxed);
do
{
n->m_next.store(cmp, std::memory_order_relaxed);
} while (!m_head.compare_exchange_weak(
cmp,
n,
std::memory_order_release
));
}
void iterate()
{
node* head = m_head.load(std::memory_order_acquire);
while (head)
{
node* next = head->m_next.load(std::memory_order_acquire);
rl::yield(1, $);
head = next;
}
}
node* find_random_node()
{
node* head = m_head.load(std::memory_order_acquire);
while (head)
{
node* next = head->m_next.load(std::memory_order_acquire);
unsigned long humm = rl::rand(2);
if (humm == 1)
{
return head;
}
head = next;
}
return nullptr;
}
void dump()
{
node* head = m_head.exchange(nullptr, std::memory_order_acquire);
while (head)
{
node* next = head->m_next.load(std::memory_order_acquire);
delete head;
head = next;
}
}
};
struct ct_experiment
: rl::test_suite<ct_experiment, CT_THREADS>
{
ct_strange_stack g_stack;
// reader
void reader(unsigned int tidx)
{
g_stack.iterate();
}
// writer
void writer(unsigned int tidx)
{
for (unsigned long i = 0; i < 3; ++i)
{
ct_strange_stack::node* n = new ct_strange_stack::node();
g_stack.push_head(n);
}
unsigned long i = 3;
ct_strange_stack::node* n = nullptr;
while ((n = g_stack.find_random_node()) && --i != 0)
{
// insert a node wrt n
ct_strange_stack::node* new_node = new
ct_strange_stack::node();
g_stack.push_node(n->m_next, new_node);
}
}
void thread(unsigned int tidx)
{
if (tidx < CT_WRITERS)
{
writer(tidx);
}
else
{
reader(tidx);
}
}
};
// Test away... Or fly? Humm...
int main()
{
{
rl::test_params p;
p.iteration_count = 10000000;
//p.execution_depth_limit = 33333;
//p.search_type = rl::sched_bound;
//p.search_type = rl::fair_full_search_scheduler_type;
//p.search_type = rl::fair_context_bound_scheduler_type;
rl::simulate<ct_experiment>(p);
}
return 0;
}
________________________________________