On Saturday, October 22, 2016 at 4:22:44 PM UTC-4, Mr Flibble wrote:
>
> Umm ... yes.
>
> boost::fast_pool_allocator can offer better performance over
> std::allocator when it uses a mutex (default)
I don't think so. I ran the test below (Windows 10, vc140), and the results
were
(std_allocator) 3356 milliseconds
(boost_allocator) 5949 milliseconds
As an aside, from the boost docs, "The underlying singleton_pool used by the
[boost::fast_pool_allocator] allocator constructs a pool instance that is never
freed."
#include <boost/pool/pool_alloc.h>
#include <map>
#include <chrono>
using std::chrono::high_resolution_clock;
using std::chrono::time_point;
using std::chrono::duration;
struct book
{
std::string author;
std::string title;
double price;
};
using boost_allocator = boost::fast_pool_allocator<std::pair<size_t,book>>;
using std_allocator = std::allocator<std::pair<size_t, book>>;
using map1 = std::map<size_t, book, std::less<size_t>, std_allocator>;
using map2 = std::map<size_t, book, std::less<size_t>, boost_allocator>;
int main()
{
book book1{ "Haruki Murakami", "Kafka on the Shore", 25.17 };
size_t count = 10000000;
{
map1 booklist;
auto start = high_resolution_clock::now();
for (size_t i = 0; i < count; ++i)
{
booklist.insert(std::make_pair(i, book1));
}
auto end = high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "(std_allocator) " << elapsed << " milliseconds" << std::endl;
}
{
map2 booklist;
auto start = high_resolution_clock::now();
for (size_t i = 0; i < count; ++i)
{
booklist.insert(std::make_pair(i, book1));
}
auto end = high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "(boost_allocator) " << elapsed << " milliseconds" << std::endl;
}
return 0;
}