I've developed such a rw-mutex on my own - with the calculation of the
spinning-interval taken from the glibc:
// -------------------- .h
#pragma once
#include <cstdint>
#include <cassert>
#include <thread>
#include <new>
#include <atomic>
#include <stdexcept>
#include <exception>
#include <limits>
#include <cstdint>
#include <semaphore>
#include "thread_id.h"
#include "msvc-disabler.h"
#if defined(__llvm__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdangling-else"
#endif
struct wbsm_exception : public std::exception
{
enum reason : std::uint8_t
{
SHARER_COUNTER_SATURATED = 1,
WATING_EXCLUSIVE_COUNTER_SATURATED,
RECURSION_COUNTER_SATURATED,
RECURSION_COUNTER_NOT_ZERO,
INVALID_THREAD_ID
};
wbsm_exception() = delete;
wbsm_exception( reason r, char const *what );
reason get_reason();
virtual
char const *what() const noexcept;
private:
reason m_reason;
char const *m_what;
};
inline
wbsm_exception::wbsm_exception( reason r, char const *what ) :
m_reason( r ),
m_what( what )
{
}
inline
wbsm_exception::reason wbsm_exception::get_reason()
{
return m_reason;
}
static_assert(std::atomic<std::uint64_t>::is_always_lock_free,
"std::uint64_t must be lock-free");
struct alignas(64)
wbias_shared_mutex
{
wbias_shared_mutex( std::int16_t maxExclusiveSpinCount = 100,
std::int16_t maxSharedSpinCount = 100 );
wbias_shared_mutex( wbias_shared_mutex const & ) = delete;
void operator =( wbias_shared_mutex const & ) = delete;
~wbias_shared_mutex();
void lock_shared();
bool try_lock_shared();
void unlock_shared();
void shared_to_exclusive( thread_id const &threadId = thread_id::self() );
void lock_exclusive( thread_id const &threadId = thread_id::self() );
bool try_lock_exclusive( thread_id const &threadId = thread_id::self() );
bool lock_preferred_shared( thread_id const &threadId =
thread_id::self() );
void unlock_exclusive( thread_id const &threadId = thread_id::self() );
void exclusive_to_shared( bool force = false, thread_id const &threadId
= thread_id::self() );
bool is_shared();
bool is_exclusive();
bool we_are_exclusive( thread_id const &threadId = thread_id::self() );
thread_id get_exclusive_thread_id();
std::uint32_t get_exclusive_recursion_count();
std::int16_t max_exclusive_spin_count( std::int16_t max );
std::int16_t max_shared_spin_count( std::int16_t max );
private:
static_assert(std::atomic<std::uint64_t>::is_always_lock_free,
"std::atomic<std::uint64_t> must be always lock-free");
std::atomic<std::uint64_t> m_atomic;
thread_id m_exclusiveThreadId;
std::uint32_t m_exclusiveRecursionCount;
std::int16_t m_exclusiveSpinCount, m_sharedSpinCount; // adaptive
spinning taken from glibc
std::int16_t m_maxExclusiveSpinCount, m_maxSharedSpinCount;
std::counting_semaphore<((unsigned)-1 >> 1)> m_releaseSharedSem;
std::binary_semaphore m_releaseExclusiveSem;
static unsigned const WAITING_SHARERS_BASE = 21, WAITING_EXCLUSIVE_BASE
= 42, EXCLUSIVE_FLAG_BASE = 63;
static std::uint64_t const MASK21 = 0x1FFFFFu;
static std::uint64_t const SHARERS_MASK = MASK21, WAITING_SHARERS_MASK
= MASK21 << WAITING_SHARERS_BASE, WAITING_EXCLUSIVE_MASK = MASK21 <<
WAITING_EXCLUSIVE_BASE, EXCLUSIVE_FLAG_MASK = (std::uint64_t)1 <<
EXCLUSIVE_FLAG_BASE;
static std::uint64_t const SHARER_VALUE = (std::uint64_t)1,
WAITING_SHARERS_VALUE = (std::uint64_t)1 << WAITING_SHARERS_BASE,
WAITING_EXCLUSIVE_VALUE = (std::uint64_t)1 << WAITING_EXCLUSIVE_BASE;
static short const DEFAULT_MAX_SPIN_COUNT = 100;
#if !defined(NDEBUG)
static bool check( std::uint64_t flags );
#endif
bool internal_spin_lock_shared( std::uint64_t &cmp );
void internal_lock_shared( uint64_t cmp );
bool internal_spin_lock_exclusive( std::uint64_t &cmp, thread_id const
&threadId );
void cpu_pause();
};
inline
bool wbias_shared_mutex::is_shared()
{
return m_atomic.load( std::memory_order_relaxed ) & SHARERS_MASK;
}
inline
bool wbias_shared_mutex::is_exclusive()
{
return m_atomic.load( std::memory_order_relaxed ) & EXCLUSIVE_FLAG_MASK;
}
inline
bool wbias_shared_mutex::we_are_exclusive( thread_id const &threadId )
{
return (m_atomic.load( std::memory_order_relaxed ) &
EXCLUSIVE_FLAG_MASK) && m_exclusiveThreadId == threadId;
}
inline
thread_id wbias_shared_mutex::get_exclusive_thread_id()
{
//std::atomic_thread_fence( std::memory_order_acquire );
return m_exclusiveThreadId;
}
inline
std::uint32_t wbias_shared_mutex::get_exclusive_recursion_count()
{
return m_exclusiveRecursionCount;
}
struct wbsm_lock
{
enum : std::uint8_t
{
UNLOCKED,
SHARED,
EXLUSIVE
};
wbsm_lock();
wbsm_lock( wbsm_lock const &other );
wbsm_lock( wbsm_lock &&other ) noexcept;
wbsm_lock( wbias_shared_mutex &mtx, bool lockShared = true, thread_id
const &threadId = thread_id::self() );
~wbsm_lock();
wbsm_lock &operator =( wbsm_lock const &other );
wbsm_lock &operator =( wbsm_lock &&other ) noexcept;
void lock_shared( wbias_shared_mutex &mtx );
void shared_to_exclusive( thread_id const &threadId = thread_id::self() );
void exclusive_to_shared( bool force = false );
void lock_exclusive( wbias_shared_mutex &mtx, thread_id const &threadId
= thread_id::self() );
void unlock();
void lock_preferred_shared( wbias_shared_mutex &mtx, thread_id const
&threadId = thread_id::self() );
bool is_locked() const;
bool is_shared() const;
bool is_exclusive() const;
thread_id get_exclusive_thread_id() const;
wbias_shared_mutex *get_mutex() const;
std::uint8_t get_state() const;
void set_state( std::uint8_t state, wbias_shared_mutex &mtx );
private:
wbias_shared_mutex *m_mtx;
bool m_isShared;
thread_id m_threadId;
};
inline
wbsm_lock::wbsm_lock()
{
m_mtx = nullptr;
}
// may throw wbsm_exception if mutex-counter saturates
inline
wbsm_lock::wbsm_lock( wbsm_lock const &other ) :
m_mtx( other.m_mtx ),
m_isShared( other.m_isShared )
{
if( m_isShared )
m_mtx->lock_shared();
else
m_mtx->lock_exclusive( other.m_threadId ),
m_threadId = m_mtx->get_exclusive_thread_id();
}
inline
wbsm_lock::wbsm_lock( wbsm_lock &&other ) noexcept :
m_mtx( other.m_mtx ),
m_isShared( other.m_isShared ),
m_threadId( other.m_threadId )
{
other.m_mtx = nullptr;
#if !defined(NDEBUG)
other.m_threadId = thread_id();
#endif
}
// may throw wbsm_exception if mutex-counter saturates
inline
wbsm_lock::wbsm_lock( wbias_shared_mutex &mtx, bool lockShared,
thread_id const &threadId )
{
if( lockShared ) // [[likely]]
mtx.lock_shared();
else
mtx.lock_exclusive( threadId ),
m_threadId = mtx.get_exclusive_thread_id();
m_mtx = &mtx;
m_isShared = lockShared;
}
inline
wbsm_lock::~wbsm_lock()
{
if( !m_mtx )
return;
if( m_isShared ) [[likely]]
m_mtx->unlock_shared();
else
m_mtx->unlock_exclusive( m_threadId );
}
// may throw wbsm_exception if mutex-counter saturates
inline
wbsm_lock &wbsm_lock::operator =( wbsm_lock const &other )
{
if( m_mtx == other.m_mtx )
return *this;
unlock();
if( !other.m_mtx ) [[unlikely]]
return *this;
if( other.m_isShared )
other.m_mtx->lock_shared();
else
other.m_mtx->lock_exclusive( other.m_threadId ),
m_threadId = other.m_threadId;
m_mtx = other.m_mtx;
m_isShared = other.m_isShared;
return *this;
}
inline
wbsm_lock &wbsm_lock::operator =( wbsm_lock &&other ) noexcept
{
if( m_mtx == other.m_mtx )
{
other.unlock();
return *this;
}
unlock();
m_mtx = other.m_mtx;
m_isShared = other.m_isShared;
m_threadId = other.m_threadId;
other.m_mtx = nullptr;
#if !defined(NDEBUG)
other.m_threadId = thread_id();
#endif
return *this;
}
// may throw wbsm_exception if mutex-counter saturates
inline
void wbsm_lock::lock_shared( wbias_shared_mutex &mtx )
{
if( m_mtx == &mtx )
if( m_isShared ) [[likely]]
return;
else
{
m_mtx->exclusive_to_shared( false, m_threadId );
m_isShared = true;
#if !defined(NDEBUG)
m_threadId = thread_id();
#endif
return;
}
unlock();
mtx.lock_shared();
m_mtx = &mtx;
m_isShared = true;
#if !defined(NDEBUG)
m_threadId = thread_id();
#endif
}
// may throw wbsm_exception if mutex-counter saturates
inline
void wbsm_lock::shared_to_exclusive( thread_id const &threadId )
{
assert(m_mtx);
if( m_isShared )
m_mtx->shared_to_exclusive( threadId ),
m_isShared = false;
m_threadId = threadId;
}
// may throw wbsm_exception if mutex-counter saturates
inline
void wbsm_lock::exclusive_to_shared( bool force )
{
assert(m_mtx);
if( m_isShared )
return;
m_mtx->exclusive_to_shared( force, m_threadId );
m_isShared = true;
#if !defined(NDEBUG)
m_threadId = thread_id();
#endif
}
// may throw wbsm_exception if mutex-counter saturates
inline
void wbsm_lock::lock_exclusive( wbias_shared_mutex &mtx, thread_id const
&threadId )
{
if( m_mtx == &mtx )
if( !m_isShared )
return;
else
{
m_mtx->shared_to_exclusive( threadId );
m_isShared = false;
m_threadId = threadId;
return;
}
unlock();
mtx.lock_exclusive( threadId );
m_mtx = &mtx;
m_isShared = false;
m_threadId = threadId;
}
inline
void wbsm_lock::unlock()
{
if( !m_mtx ) [[unlikely]]
return;
if( m_isShared ) [[likely]]
m_mtx->unlock_shared();
else
m_mtx->unlock_exclusive( m_threadId );
m_mtx = nullptr;
}
// may throw wbsm_exception if mutex-counter saturates
inline
void wbsm_lock::lock_preferred_shared( wbias_shared_mutex &mtx,
thread_id const &threadId )
{
if( m_mtx != &mtx )
m_isShared = mtx.lock_preferred_shared( threadId ),
m_mtx = &mtx;
m_threadId = threadId;
}
inline
bool wbsm_lock::is_locked() const
{
return m_mtx != nullptr;
}
inline
bool wbsm_lock::is_shared() const
{
assert(m_mtx);
return m_isShared;
}
inline
bool wbsm_lock::is_exclusive() const
{
assert(m_mtx);
return !m_isShared;
}
inline
thread_id wbsm_lock::get_exclusive_thread_id() const
{
return m_threadId;
}
inline
std::uint8_t wbsm_lock::get_state() const
{
if( !m_mtx ) [[unlikely]]
return wbsm_lock::UNLOCKED;
if( m_isShared ) [[likely]]
return wbsm_lock::SHARED;
else
return wbsm_lock::EXLUSIVE;
}
inline
wbias_shared_mutex *wbsm_lock::get_mutex() const
{
return m_mtx;
}
#if defined(__llvm__)
#pragma clang diagnostic pop
#endif
// -------------------- .cpp
#include <utility>
#include "wbias_shared_mutex.h"
#include "xassert.h"
#include "exc_encapsulate.h"
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
#include <immintrin.h>
#endif
#if defined(__llvm__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdangling-else"
#endif
char const *wbsm_exception::what() const noexcept
{
return m_what;
}
#if !defined(NDEBUG)
inline
bool wbias_shared_mutex::check( std::uint64_t flags )
{
unsigned
sharers = (unsigned)(flags & MASK21),
waitingExclusive = (unsigned)((flags >> WAITING_EXCLUSIVE_BASE) & MASK21),
exclusiveFlag = (unsigned)((flags >> EXCLUSIVE_FLAG_BASE) & 1);
if( sharers && exclusiveFlag )
return false;
if( waitingExclusive && !exclusiveFlag && !sharers )
return false;
return true;
}
#endif
wbias_shared_mutex::wbias_shared_mutex( std::int16_t
maxExclusiveSpinCount, std::int16_t maxSharedSpinCount ) :
m_atomic( 0 ),
m_exclusiveThreadId(),
m_exclusiveSpinCount( 0 ),
m_sharedSpinCount( 0 ),
m_maxExclusiveSpinCount( maxExclusiveSpinCount ),
m_maxSharedSpinCount( maxSharedSpinCount ),
m_releaseSharedSem( 0 ),
m_releaseExclusiveSem( 0 )
{
}
wbias_shared_mutex::~wbias_shared_mutex()
{
assert(m_atomic == 0);
}
// throws wbsm_exception if waiting sharers counter saturates
void wbias_shared_mutex::lock_shared()
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_relaxed );
if( internal_spin_lock_shared( cmp ) )
return;
internal_lock_shared( cmp );
}
inline
bool wbias_shared_mutex::internal_spin_lock_shared( std::uint64_t &cmp )
{
using namespace std;
if( m_maxSharedSpinCount <= 0 )
return false;
int32_t maxSpin = (int32_t)m_sharedSpinCount * 2 + 10;
maxSpin = maxSpin <= m_maxSharedSpinCount ? maxSpin : m_maxSharedSpinCount;
int32_t spinCount = 0;
bool locked = false;
do
{
assert(check( cmp ));
if( cmp & EXCLUSIVE_FLAG_MASK )
{
cpu_pause();
cmp = m_atomic.load( memory_order_relaxed );
continue;
}
if( (cmp & SHARERS_MASK) == SHARERS_MASK )
return false;
if( m_atomic.compare_exchange_weak( cmp, cmp + SHARER_VALUE,
memory_order_acquire, memory_order_relaxed ) )
{
locked = true;
break;
}
cpu_pause();
} while( ++spinCount < maxSpin );
m_sharedSpinCount += ((int16_t)spinCount - m_sharedSpinCount) / 8;
return locked;
}
// throws wbsm_exception if waiting sharers counter saturates
inline
void wbias_shared_mutex::internal_lock_shared( uint64_t cmp )
{
using namespace std;
for( ; ; )
{
assert(check( cmp ));
if( (cmp & (EXCLUSIVE_FLAG_MASK | WAITING_EXCLUSIVE_MASK)) == 0 )
{
if( (cmp & SHARERS_MASK) == SHARERS_MASK )
throw wbsm_exception( wbsm_exception::SHARER_COUNTER_SATURATED,
"wbsm-lock sharer-counter saturated" );
if( m_atomic.compare_exchange_weak( cmp, cmp + SHARER_VALUE,
memory_order_acquire, memory_order_relaxed ) )
return;
}
else
{
if( (cmp & SHARERS_MASK) + ((cmp & WAITING_SHARERS_MASK) >>
WAITING_SHARERS_BASE) >= SHARERS_MASK )
throw wbsm_exception( wbsm_exception::SHARER_COUNTER_SATURATED,
"wbsm-lock sharer-counter saturated" );
if( m_atomic.compare_exchange_weak( cmp, cmp + WAITING_SHARERS_VALUE,
memory_order_relaxed, memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseSharedSem.acquire(); } );
return;
}
}
}
}
// throws wbsm_exception if recursion-counter saturates
inline
bool wbias_shared_mutex::internal_spin_lock_exclusive( std::uint64_t
&cmp, thread_id const &threadId )
{
using namespace std;
assert(check( cmp ));
if( (cmp & EXCLUSIVE_FLAG_MASK) && m_exclusiveThreadId == threadId )
{
if( m_exclusiveRecursionCount == numeric_limits<uint32_t>::max() )
throw wbsm_exception( wbsm_exception::RECURSION_COUNTER_SATURATED,
"wbsm-lock recursion-counter saturated" );
++m_exclusiveRecursionCount;
return true;
}
if( m_maxExclusiveSpinCount <= 0 )
return false;
int32_t maxSpin = (int32_t)m_exclusiveSpinCount * 2 + 10;
maxSpin = maxSpin <= m_maxExclusiveSpinCount ? maxSpin :
m_maxExclusiveSpinCount;
int32_t spinCount = 0;
bool locked = false;
do
{
assert(check( cmp ));
cmp = 0;
if( m_atomic.compare_exchange_weak( cmp, EXCLUSIVE_FLAG_MASK,
memory_order_acquire, memory_order_relaxed ) )
{
m_exclusiveThreadId = threadId;
m_exclusiveRecursionCount = 0;
locked = true;
break;
}
cpu_pause();
} while( ++spinCount < maxSpin );
m_exclusiveSpinCount += ((int16_t)spinCount - m_exclusiveSpinCount) / 8;
return locked;
}
bool wbias_shared_mutex::try_lock_shared()
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_relaxed );
return internal_spin_lock_shared( cmp );
}
void wbias_shared_mutex::unlock_shared()
{
using namespace std;
for( uint64_t cmp = m_atomic.load( memory_order_relaxed ); ; )
{
assert(check( cmp ));
assert((cmp & SHARERS_MASK) >= SHARER_VALUE);
if( (cmp & WAITING_EXCLUSIVE_MASK) == 0 || (cmp & SHARERS_MASK) !=
SHARER_VALUE )
{
if( m_atomic.compare_exchange_weak( cmp, cmp - SHARER_VALUE,
memory_order_relaxed, memory_order_relaxed ) )
return;
}
else
{
assert(!(cmp & EXCLUSIVE_FLAG_MASK));
if( m_atomic.compare_exchange_weak( cmp, (cmp - SHARER_VALUE -
WAITING_EXCLUSIVE_VALUE) | EXCLUSIVE_FLAG_MASK, memory_order_relaxed,
memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseExclusiveSem.release( 1 ); } );
return;
}
}
}
}
// throws wbsm_exception if waiting exclusive counter saturates
void wbias_shared_mutex::shared_to_exclusive( thread_id const &threadId )
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_relaxed );
if( m_maxExclusiveSpinCount > 0 )
{
int32_t maxSpin = (int32_t)m_exclusiveSpinCount * 2 + 10;
maxSpin = maxSpin <= m_maxExclusiveSpinCount ? maxSpin :
m_maxExclusiveSpinCount;
int32_t spinCount = 0;
bool locked = false;
do
{
assert(check( cmp ));
assert((cmp & SHARERS_MASK) >= SHARER_VALUE);
cmp = (cmp & ~SHARERS_MASK) | 1;
if( m_atomic.compare_exchange_weak( cmp, (cmp & ~SHARERS_MASK) |
EXCLUSIVE_FLAG_MASK, memory_order_acquire, memory_order_relaxed ) )
{
m_exclusiveThreadId = threadId;
m_exclusiveRecursionCount = 0;
locked = true;
break;
}
cpu_pause();
} while( ++spinCount < maxSpin );
m_exclusiveSpinCount += ((int16_t)spinCount - m_exclusiveSpinCount) / 8;
if( locked )
return;
}
for( ; ; )
{
assert(check( cmp ));
assert((cmp & SHARERS_MASK) >= SHARER_VALUE);
if( (cmp & SHARERS_MASK) == SHARER_VALUE )
if( m_atomic.compare_exchange_weak( cmp, (cmp - SHARER_VALUE) |
EXCLUSIVE_FLAG_MASK, memory_order_acquire, memory_order_relaxed ) )
{
m_exclusiveThreadId = threadId;
m_exclusiveRecursionCount = 0;
return;
}
else;
else
{
if( (cmp & WAITING_EXCLUSIVE_MASK) == WAITING_EXCLUSIVE_MASK )
throw wbsm_exception(
wbsm_exception::WATING_EXCLUSIVE_COUNTER_SATURATED, "wbsm-lock
waiting-exclusive-count saturated" );
if( m_atomic.compare_exchange_weak( cmp, cmp - SHARER_VALUE +
WAITING_EXCLUSIVE_VALUE, memory_order_relaxed, memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseExclusiveSem.acquire(); } );
m_exclusiveThreadId = threadId;
m_exclusiveRecursionCount = 0;
return;
}
}
}
}
// throws wbsm_exception if waiting exclusive counter saturates
void wbias_shared_mutex::lock_exclusive( thread_id const &threadId )
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_acquire );
if( internal_spin_lock_exclusive( cmp, threadId ) )
return;
for( ; ; )
{
assert(check( cmp ));
if( (cmp & (EXCLUSIVE_FLAG_MASK | SHARERS_MASK)) == 0 )
if( m_atomic.compare_exchange_weak( cmp, cmp | EXCLUSIVE_FLAG_MASK,
memory_order_acquire, memory_order_relaxed ) )
{
m_exclusiveThreadId = threadId;
m_exclusiveRecursionCount = 0;
return;
}
else;
else
{
if( (cmp & WAITING_EXCLUSIVE_MASK) == WAITING_EXCLUSIVE_MASK )
throw wbsm_exception(
wbsm_exception::WATING_EXCLUSIVE_COUNTER_SATURATED, "wbsm-lock
waiting-waiters-counter saturated" );
if( m_atomic.compare_exchange_weak( cmp, cmp +
WAITING_EXCLUSIVE_VALUE, memory_order_relaxed, memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseExclusiveSem.acquire(); } );
m_exclusiveThreadId = threadId;
m_exclusiveRecursionCount = 0;
return;
}
}
}
}
bool wbias_shared_mutex::try_lock_exclusive( thread_id const &threadId )
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_acquire );
return internal_spin_lock_exclusive( cmp, threadId );
}
// throws wbsm_exception if recursion-counter saturates
// throws wbsm_exception if waiting sharers counter saturates
bool wbias_shared_mutex::lock_preferred_shared( thread_id const &threadId )
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_acquire );
assert(check( cmp ));
if( (cmp & EXCLUSIVE_FLAG_MASK) && m_exclusiveThreadId == threadId )
{
if( m_exclusiveRecursionCount == numeric_limits<uint32_t>::max() )
throw wbsm_exception( wbsm_exception::RECURSION_COUNTER_SATURATED,
"wbsm-lock recursion-counter saturated" );
++m_exclusiveRecursionCount;
return false;
}
if( internal_spin_lock_shared( cmp ) )
return true;
internal_lock_shared( cmp );
return true;
}
void wbias_shared_mutex::unlock_exclusive( thread_id const &threadId )
{
using namespace std;
uint64_t cmp = m_atomic.load( memory_order_acquire );
assert(check( cmp ));
assert((cmp & EXCLUSIVE_FLAG_MASK) && m_exclusiveThreadId == threadId);
if( (cmp & EXCLUSIVE_FLAG_MASK) && m_exclusiveRecursionCount &&
m_exclusiveThreadId == threadId )
{
--m_exclusiveRecursionCount;
return;
}
m_exclusiveThreadId = thread_id();
for( ; ; )
{
assert(check( cmp ));
assert(cmp & EXCLUSIVE_FLAG_MASK);
if( (cmp & WAITING_EXCLUSIVE_MASK) != 0 )
if( m_atomic.compare_exchange_weak( cmp, cmp -
WAITING_EXCLUSIVE_VALUE, memory_order_release, memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseExclusiveSem.release( 1 ); } );
return;
}
else
continue;
if( (cmp & WAITING_SHARERS_MASK) != 0 )
{
uint64_t waitingSharers = cmp & WAITING_SHARERS_MASK,
wakeups = waitingSharers >> WAITING_SHARERS_BASE;
if( m_atomic.compare_exchange_weak( cmp, (cmp & ~EXCLUSIVE_FLAG_MASK)
- waitingSharers + wakeups, memory_order_release, memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseSharedSem.release(
(ptrdiff_t)wakeups ); } );
return;
}
else
continue;
}
if( m_atomic.compare_exchange_weak( cmp, 0, memory_order_release,
memory_order_relaxed ) )
return;
}
}
// may throw wbsm_exception (lock remains exclusive)
void wbias_shared_mutex::exclusive_to_shared( bool force, thread_id
const &threadId )
{
using namespace std;
uint64_t cmp = m_atomic.load( std::memory_order_relaxed );
assert((cmp & EXCLUSIVE_FLAG_MASK));
if( m_exclusiveThreadId != threadId )
throw wbsm_exception( wbsm_exception::INVALID_THREAD_ID, "wbsm-lock,
trying to switch from a foreign thread id into shared mode" );
if( m_exclusiveRecursionCount )
throw wbsm_exception( wbsm_exception::RECURSION_COUNTER_NOT_ZERO,
"wbsm-lock, trying to switch into shared mode with recursion count != 0" );
assert((cmp & EXCLUSIVE_FLAG_MASK) && m_exclusiveRecursionCount == 0);
if( m_maxSharedSpinCount > 0 )
{
int32_t maxSpin = (int32_t)m_sharedSpinCount * 2 + 10;
maxSpin = maxSpin <= m_maxSharedSpinCount ? maxSpin :
m_maxSharedSpinCount;
int32_t spinCount = 0;
bool setSpinCount = true;
do
{
assert(check( cmp ));
//uint64_t wakeups = ((cmp & WAITING_SHARERS_MASK) >>
WAITING_SHARERS_BASE) + (cmp & SHARERS_MASK);
uint64_t wakeups = (cmp & WAITING_SHARERS_MASK) >> WAITING_SHARERS_BASE;
if( wakeups == SHARERS_MASK )
{
setSpinCount = false;
break;
}
if( !force )
cmp &= ~WAITING_EXCLUSIVE_MASK;
if( m_atomic.compare_exchange_weak( cmp, (cmp & ~EXCLUSIVE_FLAG_MASK)
+ wakeups + SHARER_VALUE,
memory_order_release,
memory_order_relaxed ) )
{
if( wakeups )
exc_terminate_or_spin( [&]() { m_releaseSharedSem.release(
(ptrdiff_t)wakeups ); } );
m_sharedSpinCount += ((int16_t)spinCount - m_sharedSpinCount) / 8;
return;
}
cpu_pause();
} while( ++spinCount < maxSpin );
if( setSpinCount )
m_sharedSpinCount += ((int16_t)spinCount - m_sharedSpinCount) / 8;
}
thread_id recoverThreadId = m_exclusiveThreadId;
m_exclusiveThreadId = thread_id();
for( ; ; )
{
assert(check( cmp ));
uint64_t waitingSharers = cmp & WAITING_SHARERS_MASK,
wakeups = waitingSharers >> WAITING_SHARERS_BASE;
if( !(cmp & WAITING_EXCLUSIVE_MASK) || force )
{
if( wakeups == SHARERS_MASK )
{
m_exclusiveThreadId = recoverThreadId;
throw wbsm_exception( wbsm_exception::SHARER_COUNTER_SATURATED,
"wbsm-lock sharer-counter saturated" );
}
if( m_atomic.compare_exchange_weak( cmp, (cmp & ~EXCLUSIVE_FLAG_MASK)
- waitingSharers + wakeups + SHARER_VALUE, memory_order_release,
memory_order_relaxed ) )
{
if( wakeups )
exc_terminate_or_spin( [&]() { m_releaseSharedSem.release(
(ptrdiff_t)wakeups ); } );
return;
}
else
continue;
}
if( wakeups == SHARERS_MASK )
{
m_exclusiveThreadId = recoverThreadId;
throw wbsm_exception( wbsm_exception::SHARER_COUNTER_SATURATED,
"wbsm-lock sharer-counter saturated" );
}
if( m_atomic.compare_exchange_weak( cmp, cmp - WAITING_EXCLUSIVE_VALUE
+ WAITING_SHARERS_VALUE, memory_order_release, memory_order_relaxed ) )
{
exc_terminate_or_spin( [&]() { m_releaseExclusiveSem.release( 1 ); } );
exc_terminate_or_spin( [&]() { m_releaseSharedSem.acquire(); });
return;
}
}
}
inline
void wbias_shared_mutex::cpu_pause()
{
#if defined(_MSC_VER) || defined(__GNUC__) && (defined(__x86_64__) ||
defined(__i386__))
_mm_pause();
#else
#error "need platform-specific pause-instruction"
#endif
}
std::int16_t wbias_shared_mutex::max_exclusive_spin_count( std::int16_t
max )
{
std::swap( max, m_maxExclusiveSpinCount );
return max;
}
std::int16_t wbias_shared_mutex::max_shared_spin_count( std::int16_t max )
{
std::swap( max, m_maxSharedSpinCount );
return max;
}
void wbsm_lock::set_state( std::uint8_t state, wbias_shared_mutex &mtx )
{
xassert(state == wbsm_lock::UNLOCKED || state == wbsm_lock::SHARED ||
state == wbsm_lock::EXLUSIVE);
switch( state )
{
case wbsm_lock::UNLOCKED:
unlock();
break;
case wbsm_lock::SHARED:
lock_shared( mtx );
break;
case wbsm_lock::EXLUSIVE:
lock_exclusive( mtx );
break;
}
}