That's what you want, a ring-buffered logger, first the ring-buffer
class (lacks iterators, hence not as efficent as it could be):
#pragma once
#include <ctime>
#include <vector>
#include <stdexcept>
#include <new>
#include <utility>
#include <cstring>
template<typename T>
struct ring_buffer
{
ring_buffer( std::size_t size );
T &operator []( std::size_t idx );
T &front();
void pop_front();
template<typename ... Args>
T &emplace_back( Args &&...args );
bool full();
std::size_t size();
private:
using vec_t = std::vector<T>;
using it_t = typename vec_t::iterator;
vec_t m_ring;
it_t m_read, m_write;
bool m_full;
};
template<typename T>
ring_buffer<T>::ring_buffer( std::size_t size ) :
m_ring( size ),
m_read( m_ring.begin() ),
m_write( m_ring.begin() ),
m_full( false )
{
if( !size )
throw std::invalid_argument( "ring-buffer size must be != 0" );
}
template<typename T>
T &ring_buffer<T>::operator []( std::size_t idx )
{
using namespace std;
if( !m_full && m_read == m_write )
throw logic_error( "ring-buffer empty" );
if( m_write > m_read )
if( idx < (size_t)(m_write - m_read) )
return m_read[idx];
else
throw invalid_argument( "ring-buffer index too large" );
if( idx < (size_t)(m_ring.end() - m_read) )
return m_read[idx];
if( (idx -= m_ring.end() - m_read) >= (size_t)(m_write - m_ring.begin()) )
throw invalid_argument( "ring-buffer index too large" );
return m_ring[idx];
}
template<typename T>
T &ring_buffer<T>::front()
{
if( !m_full && m_read == m_write )
throw std::invalid_argument( "ring-buffer empty" );
return *m_read;
}
template<typename T>
void ring_buffer<T>::pop_front()
{
if( !m_full && m_read == m_write )
throw std::logic_error( "ring-buffer empty" );
if( ++m_read == m_ring.end() )
m_read = m_ring.begin();
m_full = m_read == m_write;
}
template<typename T>
template<typename ... Args>
T &ring_buffer<T>::emplace_back( Args &&...args )
{
using namespace std;
if( m_full )
throw logic_error( "ring buffer full" );
T &t = *m_write;
t.~T();
new( &t )T( forward<Args>( args ) ... );
if( ++m_write == m_ring.end() )
m_write = m_ring.begin();
m_full = m_write == m_read;
return t;
}
template<typename T>
std::size_t ring_buffer<T>::size()
{
if( m_full )
return m_ring.size();
if( m_read < m_write )
return m_write - m_read;
return (m_ring.end() - m_read) + (m_write - m_ring.begin());
}
template<typename T>
inline
bool ring_buffer<T>::full()
{
return m_full;
}
Now the circular log main file:
#if defined(_MSC_VER)
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "ring_buffer.h"
#include <iostream>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <atomic>
#include <chrono>
#include <random>
#include <algorithm>
using namespace std;
using namespace chrono;
using namespace chrono_literals;
int main( int argc, char **argv )
{
struct log_entry_t
{
time_t timestamp;
char msg[100];
log_entry_t() :
timestamp( -1 )
{
msg[0] = 0;
}
log_entry_t( time_t timestamp, char const *msg ) :
timestamp( timestamp )
{
strncpy( this->msg, msg, 100 );
}
log_entry_t( log_entry_t const &le ) :
timestamp( le.timestamp )
{
strncpy( this->msg, le.msg, 100 );
}
};
using ring_t = ring_buffer<log_entry_t>;
mutex mtx;
size_t const MAX_LOG_SIZE = 1'000'000;
ring_t logRing( MAX_LOG_SIZE );
condition_variable cvReady, cvRun, cvStop;
unsigned ready = 0;
bool run = false,
stop = false;
atomic_int64_t nsCreate = 0,
nsInsert = 0;
atomic_int64_t nEnqueued = 0;
auto waitStart = [&]
{
unique_lock<mutex> ul( mtx );
++ready;
cvReady.notify_one();
cvRun.wait( ul, [&]() { return run; } );
};
auto wait = [&]( unique_lock<mutex> &ul, milliseconds ms ) -> bool
{
nanoseconds waited = nanoseconds( 0 );
for( ; ; )
{
milliseconds rndWaited = duration_cast<milliseconds>( waited + 500us );
if( rndWaited >= ms )
return !stop;
time_point<high_resolution_clock> waitBegin =
high_resolution_clock::now();
cv_status cvs = cvStop.wait_for( ul, ms
- rndWaited );
if( stop )
return false;
if( cvs == cv_status::timeout )
return true;
waited += duration_cast<nanoseconds>( high_resolution_clock::now() -
waitBegin );
}
};
auto logThread = [&]()
{
waitStart();
mt19937_64 mt( (random_device())() );
uniform_int_distribution<int> uidMsGap( 10, 50 );
uniform_int_distribution<size_t> uidMsgLength( 10, 100 );
uniform_int_distribution<int> uidMsgChars( 'a', 'z' );
int64_t nsCreateT = 0,
nsInsertT = 0;
for( ; ; )
{
unique_lock<mutex> ul( mtx );
if( !wait( ul, milliseconds( uidMsGap( mt ) ) ) )
{
nsCreate.fetch_add( nsCreateT );
nsInsert.fetch_add( nsInsertT );
return;
}
ul.unlock();
time_point<high_resolution_clock> begin = high_resolution_clock::now();
time_t timeStamp = time( &timeStamp );
if( timeStamp == (time_t)-1 )
continue;
char msg[100];
size_t msgLength = uidMsgLength( mt );
if( msgLength < 100 )
msg[msgLength] = 0;
for( ; msgLength--; msg[msgLength] = (char)uidMsgChars( mt ) );
nsCreateT += duration_cast<nanoseconds>( high_resolution_clock::now()
- begin ).count();
begin = high_resolution_clock::now();
ul.lock();
if( logRing.full() )
logRing.pop_front();
logRing.emplace_back( timeStamp, msg );
ul.unlock();
nsInsertT += duration_cast<nanoseconds>( high_resolution_clock::now()
- begin ).count();
++nEnqueued;
}
};
auto printThread = [&]()
{
waitStart();
vector<log_entry_t> cpyLog;
for( ; ; )
{
unique_lock<mutex> ul( mtx );
if( !wait( ul, 1s ) )
return;
size_t logSize = logRing.size();
vector<log_entry_t> cpyLog( logSize );
for( size_t i = 0; i != logSize; ++i )
cpyLog[i] = logRing[i];
ul.unlock();
sort( cpyLog.begin(), cpyLog.end(),
[]( log_entry_t &left, log_entry_t &right )
{
return left.timestamp < right.timestamp;
} );
}
};
unsigned const MAX_THREADS = 10'000;
vector<thread> loggers;
thread printer;
for( unsigned t = MAX_THREADS; t--; )
loggers.emplace_back( logThread );
if( argc > 1 )
printer = thread( printThread );
for( ; ; )
{
unique_lock<mutex> ul( mtx );
if( ready == MAX_THREADS + (argc > 1) )
break;
cvReady.wait( ul );
}
run = true, cvRun.notify_all();
unsigned const SECS = 10;
this_thread::sleep_for( seconds( SECS ) );
stop = true, cvStop.notify_all();
for( thread &thr : loggers )
thr.join();
if( argc > 1 )
printer.join();
cout << (double)nsInsert / nsCreate / (1.0e9 / 100.0) << "%" << endl;
cout << (double)nEnqueued / (double)SECS << endl;
}
This code has a 10.000 logging-threads and optionally one print-thread
if theres a cmdline-param that gets a copy of the log once a second and
sorts it; the elements could be not strictly sorted because the time-
stamp is got when the queue is not locked so that the creation of the
log-enties among different threads could have a race-condition.