On 10/31/2019 10:43 AM, winden wrote:
> I recall hash locking was used in Solaris too, so maybe it came before that?
>
It had to. Fwiw, I was just messing around with some locking code, and
said to myself: Why not simply hash the address of an object into a
static table of locks that never die? Well, this can be used to
implement a lot of things, C++ atomics wrt a zero value for all of the
following defines:
https://en.cppreference.com/w/cpp/atomic/atomic_is_lock_free
This is nice because an object does not need a lock for its address
hashes into a lock. Also, these locks stay around and never perish.
Well, my little idea cannot be new at all. Its so basic. Well, there is
a nice aspect where my experiment can allow threads to take a series of
locks in an atomic deadlock free operation by removing duplicates and
sorting a threads hashed locks:
_________________
// pseudo-code
mtx_table lock_table;
// some vars
int a = 3;
short b = 6;
double x = 1.234;
// a thread
{
locker locks(lock_table); // our hashed locks
locks.push(&a);
locks.push(&x);
locks.push(&b);
locks.lock();
// a, x, and b are all locked!
locks.unlock();
}
_________________
Then again, sorting these thread local locks cannot be new as well. Here
is a C++11 program, that should compile right up in GCC:
____________________________
/*
The Multex, simple deadlock free locking abstraction
By Chris M. Thomasson
____________________________________________________________*/
#include <iostream>
#include <functional>
#include <algorithm>
#include <mutex>
#include <thread>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>
#define THREADS 7
#define N 123456
// Allows one to lock multiple addresses
// at once, and never hit a deadlock.
// It is an experiment.
namespace ct_multex
{
// A table of locks
struct mutex_table
{
std::vector<std::mutex> m_locks;
mutex_table(std::size_t size) : m_locks(size) { assert(size >
0); }
// totally contrived simple hash...
std::size_t hash(void const* ptr)
{
std::uintptr_t ptr_uint = (std::uintptr_t)ptr;
return (std::size_t)(((ptr_uint << 9) * 103) %
m_locks.size());
}
};
// A threads local indices into a table of locks
struct local_locks
{
std::vector<std::size_t> m_lock_idxs;
mutex_table& m_mtx_tbl;
local_locks(mutex_table& mtx_tbl) :
m_lock_idxs(), m_mtx_tbl(mtx_tbl) {}
// Add an address to be locked
void push_ptr(void const* ptr)
{
std::size_t idx = m_mtx_tbl.hash(ptr);
m_lock_idxs.push_back(idx);
}
// Deadlock free, baby! ;^)
void ensure_locking_order()
{
// sort and remove duplicates...
std::sort(m_lock_idxs.begin(), m_lock_idxs.end());
m_lock_idxs.erase(std::unique(m_lock_idxs.begin(),
m_lock_idxs.end()), m_lock_idxs.end());
}
// Take all of the locks
void lock()
{
// there can be a flag to minimize this...
ensure_locking_order();
std::size_t n = m_lock_idxs.size();
for (std::size_t i = 0; i < n; ++i)
{
m_mtx_tbl.m_locks[m_lock_idxs[i]].lock();
}
}
// Unlock everything
void unlock()
{
std::size_t n = m_lock_idxs.size();
for (std::size_t i = 0; i < n; ++i)
{
m_mtx_tbl.m_locks[m_lock_idxs[n - i - 1]].unlock();
}
}
};
// RAII scoped lock: Allows a thread to actualy take the locks
// It locks a threads local lock indices
struct scoped_lock
{
local_locks& m_locks;
scoped_lock(local_locks& locks) : m_locks(locks)
{
m_locks.lock();
}
~scoped_lock() throw()
{
m_locks.unlock();
}
};
}
// Test program...
//______________________________________
// Shared data
struct ct_shared
{
ct_multex::mutex_table& m_multex;
ct_shared(ct_multex::mutex_table& multex)
: m_multex(multex), data_0(1), data_1(2), data_2(3), data_3(4) { }
unsigned long data_0;
unsigned long data_1;
unsigned long data_2;
unsigned long data_3;
};
// a thread
void ct_thread(ct_shared& shared)
{
// Create our local locks
ct_multex::local_locks locks(shared.m_multex);
// Add some addresses
locks.push_ptr(&shared.data_2);
locks.push_ptr(&shared.data_0);
// Do some work
for (unsigned long i = 0; i < N / 2; ++i)
{
{
ct_multex::scoped_lock slock(locks);
// locked for data_0 and data_2
shared.data_0 += i;
shared.data_2 += i;
}
std::this_thread::yield();
{
ct_multex::scoped_lock slock(locks);
// locked for data_0 and data_2
shared.data_0 -= i;
std::this_thread::yield(); // for fun...
shared.data_2 -= i;
}
}
// Add some other addresses
locks.push_ptr(&shared.data_1);
locks.push_ptr(&shared.data_3);
// Do some more work...
for (unsigned long i = 0; i < N / 2; ++i)
{
{
ct_multex::scoped_lock slock(locks);
// locked for data_0, data_1, data_2 and data_3
shared.data_0 += i;
std::this_thread::yield(); // for fun...
shared.data_1 += i;
shared.data_2 += i;
shared.data_3 += i;
}
std::this_thread::yield();
{
ct_multex::scoped_lock slock(locks);
// locked for data_0, data_1, data_2 and data_3
shared.data_0 -= i;
shared.data_1 -= i;
shared.data_2 -= i;
std::this_thread::yield(); // for fun...
shared.data_3 -= i;
}
}
}
int main(void)
{
{
// Our mutex table
ct_multex::mutex_table multex_tbl(42);
// Our shared data
ct_shared shared(multex_tbl);
// Launch...
{
std::thread threads[THREADS];
// Create threads...
for (unsigned long i = 0; i < THREADS; ++i)
{
threads[i] = std::thread(ct_thread, std::ref(shared));
}
std::cout << "processing...\n\n";
std::cout.flush();
// Join threads...
for (unsigned long i = 0; i < THREADS; ++i)
{
threads[i].join();
}
}
// Verify the shared data...
std::cout << "shared.data_0 = " << shared.data_0 << "\n";
std::cout << "shared.data_1 = " << shared.data_1 << "\n";
std::cout << "shared.data_2 = " << shared.data_2 << "\n";
std::cout << "shared.data_3 = " << shared.data_3 << "\n";
assert(shared.data_0 == 1);
assert(shared.data_1 == 2);
assert(shared.data_2 == 3);
assert(shared.data_3 == 4);
}
std::cout << "\n\nfin!\n\n";
return 0;
}
____________________________