Google Groups no longer supports new Usenet posts or subscriptions. Historical content remains viewable.
Dismiss

Sieve of Erastosthenes optimized to the max

682 views
Skip to first unread message

Bonita Montero

unread,
Dec 10, 2023, 4:47:13 AM12/10/23
to
I've parallelized my sieve of Erastosthenes to all cores. The parallel
threads do al the punching of the non-prime multiplex beyond sqrt( max
). I found that the algorithm is limited by the memory bandwith since
the bit-range between sqrt( max ) and max is to large to fit inside
the cache. So I partitioned the each thread to a number of bit-ranges
that fits inside the L2-cache. With that I got a speedup of factor 28
when calculating about the first 210 million primes, i.e. all primes
<= 2 ^ 32.
On my Zen4 16 core PC under Windows with clang-cl 16.0.5 producing the
primes without any file output is only 0.28s. On my Linux-PC, a Zen2 64
core CPU producing the same number of primes is about 0.09s.
The file output is done with my own buffering. With that writing the
mentioned prime number range results in a 2.2GB file which is written
in about two seconds with a PCIe 4.0 SSD.
The first parameter is the highest prime candidate to be generated,
it can be prefixed with 0x for hex values. The second number is the
name of the output file; it can be "" to suppress file output. The
third parameter is the number of punching threads; if it is left the
number of threads defaults to the number of threads reported by the
runtime.

#include <iostream>
#include <cstdlib>
#include <charconv>
#include <cstring>
#include <vector>
#include <algorithm>
#include <cmath>
#include <bit>
#include <fstream>
#include <string_view>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <utility>
#include <semaphore>
#include <atomic>
#include <new>
#include <span>
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__GNUC__) || defined(__clang__)
#include <cpuid.h>
#endif

#if defined(_MSC_VER)
#pragma warning(disable: 26495)
#endif

using namespace std;

#if defined(__cpp_lib_hardware_interference_size)
constexpr ptrdiff_t CL_SIZE = hardware_destructive_interference_size;
#else
constexpr ptrdiff_t CL_SIZE = 64;
#endif

size_t getL2Size();

int main( int argc, char **argv )
{
if( argc < 2 )
return EXIT_FAILURE;
auto parse = []( char const *str, bool hex, auto &value )
{
from_chars_result fcr = from_chars( str, str + strlen( str ), value,
!hex ? 10 : 16 );
return fcr.ec == errc() && !*fcr.ptr;
};
char const *sizeStr = argv[1];
bool hex = sizeStr[0] == '0' && (sizeStr[1] == 'x' || sizeStr[1] == 'X');
sizeStr += 2 * hex;
size_t end;
if( !parse( sizeStr, hex, end ) )
return EXIT_FAILURE;
if( (ptrdiff_t)end++ < 0 )
throw bad_alloc();
unsigned hc;
if( argc < 4 || !parse( argv[3], false, hc ) )
hc = jthread::hardware_concurrency();
if( !hc )
return EXIT_FAILURE;
using word_t = size_t;
constexpr size_t
BITS = sizeof(word_t) * 8,
CL_BITS = CL_SIZE * 8;
union alignas(CL_SIZE) ndi_words_cl { word_t words[CL_SIZE /
sizeof(word_t)]; ndi_words_cl() {} };
size_t nCls = (end + CL_BITS - 1) / CL_BITS;
vector<ndi_words_cl> sieveCachelines( (end + CL_BITS - 1) / CL_BITS );
size_t nWords = (end + BITS - 1) / BITS;
span<word_t> sieve( &sieveCachelines[0].words[0], nWords );
auto fill = sieve.begin();
*fill++ = (word_t)0xAAAAAAAAAAAAAAACu;
if( fill != sieve.end() )
for_each( fill, sieve.end(), []( word_t &w ) { w =
(word_t)0xAAAAAAAAAAAAAAAAu; } );
size_t sqrt = (ptrdiff_t)ceil( ::sqrt( (ptrdiff_t)end ) );
auto punch = [&]( auto, size_t start, size_t end, size_t prime )
{
if( start >= end )
return;
size_t multiple = start;
if( prime >= BITS ) [[likely]]
do
sieve[multiple / BITS] &= rotl( (word_t)-2, multiple % BITS );
while( (multiple += prime) < end );
else
{
auto pSieve = sieve.begin() + multiple / BITS;
do
{
word_t
word = *pSieve,
mask = rotl( (word_t)-2, multiple % BITS ),
oldMask;
do
word &= mask,
oldMask = mask,
mask = rotl( mask, prime % BITS ),
multiple += prime;
while( mask < oldMask );
*pSieve++ = word;
} while( multiple < end );
}
};
for( size_t prime = 3; prime < sqrt; ++prime )
{
for( auto pSieve = sieve.begin() + prime / BITS; ; )
{
if( word_t w = *pSieve >> prime % BITS; w ) [[likely]]
{
prime += countr_zero( w );
break;
}
prime = prime + BITS & -(ptrdiff_t)BITS;
if( prime >= sqrt )
break;
}
if( prime >= sqrt ) [[unlikely]]
break;
punch( false_type(), prime * prime, sqrt, prime );
}
auto scan = [&]( size_t start, size_t end, auto fn )
{
for( size_t prime = start; prime < end; )
{
word_t word = sieve[prime / BITS] >> prime % BITS;
bool hasBits = word;
for( unsigned gap; word; word >>= gap, word >>= 1 )
{
gap = countr_zero( word );
if( (prime += gap) >= end ) [[unlikely]]
return;
fn( prime );
if( ++prime >= end ) [[unlikely]]
return;
}
prime = (prime + BITS - hasBits) & -(ptrdiff_t)BITS;
}
};
size_t bitsPerPartition = (end - sqrt) / hc;
using range_t = pair<size_t, size_t>;
vector<pair<size_t, size_t>> ranges;
ranges.reserve( hc );
for( size_t t = hc, rEnd = end, rStart; t--; rEnd = rStart )
{
rStart = sqrt + t * bitsPerPartition;
size_t rClStart = rStart & -(CL_SIZE * 8);
rStart = rClStart >= sqrt ? rClStart : sqrt;
if( rStart >= rEnd )
continue;
ranges.emplace_back( rStart, rEnd );
}
vector<jthread> threads;
threads.reserve( ranges.size() - 1 );
static auto dbl = []( ptrdiff_t x ) { return (double)x; };
double maxPartitionBits = dbl( getL2Size() ) * 8 / 3;
for( range_t const &range : ranges )
{
auto rangePuncher = [&]( size_t rStart, size_t rEnd )
{
double nBits = dbl( rEnd - rStart );
ptrdiff_t nPartitions = (ptrdiff_t)ceil( nBits / maxPartitionBits );
double bitsPerPartition = nBits / dbl( nPartitions );
for( ptrdiff_t p = nPartitions, pEnd = rEnd, pStart; p--; pEnd = pStart )
{
pStart = rStart + (ptrdiff_t)((double)p * bitsPerPartition);
pStart &= -(CL_SIZE * 8);
scan( 3, sqrt,
[&]( size_t prime )
{
punch( true_type(), (pStart + prime - 1) / prime * prime, pEnd,
prime );
} );
}
};
if( &range != &ranges.back() )
threads.emplace_back( rangePuncher, range.first, range.second );
else
rangePuncher( range.first, range.second );
}
threads.resize( 0 );
if( argc >= 3 && !*argv[2] )
return EXIT_SUCCESS;
ofstream ofs;
ofs.exceptions( ofstream::failbit | ofstream::badbit );
ofs.open( argc >= 3 ? argv[2] : "primes.txt", ofstream::binary |
ofstream::trunc );
constexpr size_t
BUF_SIZE = 0x100000,
AHEAD = 32;
union ndi_char { char c; ndi_char() {} };
vector<ndi_char> printBuf( BUF_SIZE + AHEAD - 1 );
auto
bufBegin = printBuf.begin(),
bufEnd = bufBegin,
bufFlush = bufBegin + BUF_SIZE;
auto print = [&]() { ofs << string_view( &bufBegin->c, &to_address(
bufEnd )->c ); };
scan( 2, end,
[&]( size_t prime )
{
auto [ptr, ec] = to_chars( &bufEnd->c, &bufEnd[AHEAD - 1].c, prime );
if( ec != errc() ) [[unlikely]]
throw system_error( (int)ec, generic_category(), "converson failed" );
bufEnd = ptr - &bufBegin->c + bufBegin; // pointer to iterator - NOP
bufEnd++->c = '\n';
if( bufEnd >= bufFlush ) [[unlikely]]
print(), bufEnd = bufBegin;
} );
print();
}

size_t getL2Size()
{
int regs[4];
auto cpuId = [&]( unsigned code )
{
#if defined(_MSC_VER)
__cpuid( regs, code );
#elif defined(__GNUC__) || defined(__clang__)
__cpuid(code, regs[0], regs[1], regs[2], regs[3]);
#endif
return regs[0];
};
if( (unsigned)cpuId( 0x80000000u ) < 0x80000006u )
return 512ull * 1024;
cpuId( 0x80000006u );
return ((unsigned)regs[2] >> 16) * 1024;
}

Vir Campestris

unread,
Dec 10, 2023, 4:48:28 PM12/10/23
to
On 10/12/2023 09:46, Bonita Montero wrote:
> I've parallelized my sieve of Erastosthenes to all cores. The parallel
> threads do al the punching of the non-prime multiplex beyond sqrt( max
> ). I found that the algorithm is limited by the memory bandwith since
> the bit-range between sqrt( max ) and max is to large to fit inside
> the cache. So I partitioned the each thread to a number of bit-ranges
> that fits inside the L2-cache. With that I got a speedup of factor 28
> when calculating about the first 210 million primes, i.e. all primes
> <= 2 ^ 32.
> On my Zen4 16 core PC under Windows with clang-cl 16.0.5 producing the
> primes without any file output is only 0.28s. On my Linux-PC, a Zen2 64
> core CPU producing the same number of primes is about 0.09s.
> The file output is done with my own buffering. With that writing the
> mentioned prime number range results in a 2.2GB file which is written
> in about two seconds with a PCIe 4.0 SSD.
> The first parameter is the highest prime candidate to be generated,
> it can be prefixed with 0x for hex values. The second number is the
> name of the output file; it can be "" to suppress file output. The
> third parameter is the number of punching threads; if it is left the
> number of threads defaults to the number of threads reported by the
> runtime.
>
<snip code>

Just so happens I've been thinking about this. I find your code
impenetrable - there are no comments at all. But two things occurred to
me quite quickly:
- You don't need to store the odd numbers
- You only need a bitmask to save the sieve.

I think you're doing the latter, though it's not obvious. I think you're
storing bits in an array of size_t, and that will be what
0xAAAAAAAAAAAAAAACu and 0xAAAAAAAAAAAAAAAAu are about. However if I am
reading that right you've assumed that unsigned is the same size as
size_t, and that the size is 64 bits.

Andy

Bonita Montero

unread,
Dec 10, 2023, 10:15:59 PM12/10/23
to
Am 10.12.2023 um 22:48 schrieb Vir Campestris:

> - You don't need to store the odd numbers

All primes except 2 are odd.

> - You only need a bitmask to save the sieve.

I'm using the "scan"-lambda which does serarch through the sieve
as fast as possible with countr_zero, which maps to a single CPU
-instruction on modern processors.

> ... reading that right you've assumed that unsigned is the same
> size as size_t, and that the size is 64 bits.

I've got a type word_t that is a size_t, but it can also
be a uint8_t to test what's the performance difference.

Vir Campestris

unread,
Dec 11, 2023, 12:12:14 PM12/11/23
to
On 11/12/2023 03:15, Bonita Montero wrote:
> Am 10.12.2023 um 22:48 schrieb Vir Campestris:
>
>> - You don't need to store the odd numbers
>
> All primes except 2 are odd.
>
I had a really bad night, and I was half asleep yesterday.

Because all primes except 2 are odd you don't need to store the _even_
numbers, which is what I meant to say. Or else you only need to store
the odd ones.

>> - You only need a bitmask to save the sieve.
>
> I'm using the "scan"-lambda which does serarch through the sieve
> as fast as possible with countr_zero, which maps to a single CPU
> -instruction on modern processors.
>
>> ... reading that right you've assumed that unsigned is the same
>> size as size_t, and that the size is 64 bits.
>
> I've got a type word_t that is a size_t, but it can also
> be a uint8_t to test what's the performance difference.
>

Your constants are 64 bit hexadecimal numbers (from counting the digits)
and are unsigned (the u suffix) but there's no guarantee that size_t
will be the same size as unsigned, nor that it will be 64 bit.

Andy

Bonita Montero

unread,
Dec 11, 2023, 12:19:30 PM12/11/23
to
Am 11.12.2023 um 18:12 schrieb Vir Campestris:

> Because all primes except 2 are odd you don't need to store the _even_
> numbers, which is what I meant to say. Or else you only need to store
> the odd ones.

I'll try that the next days; but I don't expect a significant change.

> Your constants are 64 bit hexadecimal numbers (from counting the digits)
> and are unsigned (the u suffix) but there's no guarantee that size_t
> will be the same size as unsigned, nor that it will be 64 bit.

The constants are wide enough for 64 bit size_t-s but it won't make
a functional difference if you define word_t as a uint8_t. With an
uint8_t word_t the code runs about 1/6-th slower; that's less than
I expected.

Vir Campestris

unread,
Dec 13, 2023, 10:16:31 AM12/13/23
to
On 11/12/2023 17:19, Bonita Montero wrote:
> Am 11.12.2023 um 18:12 schrieb Vir Campestris:
>
>> Because all primes except 2 are odd you don't need to store the _even_
>> numbers, which is what I meant to say. Or else you only need to store
>> the odd ones.
>
> I'll try that the next days; but I don't expect a significant change.
>
Well, I went away and tried it. I didn't write anything nearly as
sophisticated as yours - I didn't worry about threads and caching. The
code follows. You'll have to unwrap it in a few places.

It occurred to me I could go further - not just optimise it out to store
odd numbers only, but also miss out the ones divisible by 3, or other
higher primes. The results were:
- Storing all numbers: 9.494 seconds to run the sieve
- Storing odd numbers only: 4.455. That's over twice as fast.
- Excluding also the ones divisible by 3: 3.468. That's an improvement,
but not as much as I expected. Perhaps it's got coo complicated. I don't
have a high powered PC.

>> Your constants are 64 bit hexadecimal numbers (from counting the
>> digits) and are unsigned (the u suffix) but there's no guarantee that
>> size_t will be the same size as unsigned, nor that it will be 64 bit.
>
> The constants are wide enough for 64 bit size_t-s but it won't make
> a functional difference if you define word_t as a uint8_t. With an
> uint8_t word_t the code runs about 1/6-th slower; that's less than
> I expected.
>
Most likely what you are seeing there is that you've gone from being
memory bound to being compute bound. The CPU cache will make sure the
main memory accesses are 128 bits (or whatever your bus width is)

Andy
--
#include <cassert>
#include <chrono>
#include <cmath>
#include <iostream>
#include <thread>
#include <vector>

size_t PrimeCount = 1e8;

// this is a convenience class for timing.
class stopwatch {
std::chrono::time_point<std::chrono::steady_clock> mBegin, mEnd;
public:
stopwatch() {}

// record the start of an interval
void start()
{
mBegin = std::chrono::steady_clock::now();
}

// record the end of an interval
void stop()
{
mEnd = std::chrono::steady_clock::now();
}

// report the difference between the last start and stop
double delta() const
{
return
std::chrono::duration_cast<std::chrono::milliseconds>(mEnd -
mBegin).count() / 1000.0;
}
};

// the bitmask will be stores in a class that looks like this
class Primes
{
public:
Primes(size_t size) {};
virtual ~Primes() {};
virtual void unprime(size_t value) = 0; // marks a number
as not prime
virtual bool get(size_t value) const = 0; // gets how a
number is marked
virtual size_t size() const = 0; // Size of the store
};

// Basic store. Stores all the primes in a vector<bool>
class PrimesVB: public Primes
{
std::vector<bool> mStore;
public:
PrimesVB(size_t size) : Primes(size), mStore(size, true) {};
virtual void unprime(size_t value) override
{
mStore[value] = false;
}

virtual bool get(size_t value) const override
{
return mStore[value];
}

virtual size_t size() const override
{
return mStore.size();
}
};

class Primes2: public PrimesVB
{
size_t mSize;
public:
Primes2(size_t size) : PrimesVB((size + 1) / 2), mSize(size) {};
virtual void unprime(size_t value) override
{
if (value & 1)
PrimesVB::unprime(value / 2);
}

virtual bool get(size_t value) const override
{
if (value <= 2) return true;
return (value & 1) && PrimesVB::get(value / 2);
}

virtual size_t size() const override
{
return mSize;
}
};

class Primes23: public PrimesVB
{
// Size of the map. This is a lot bigger than the size of the
bitmap.
size_t mSize;

// each "page" contains 2 "lines". A line is a prime we are
storing.
// this map is where we store each number, depending on its
value modulo 6.
// -1 is known not to be prime, and isn't stored.
// we store 7, 13, 18 in "line" 0; 11, 17, 23 in "line" 1.
// the others are either divisible by 2 or 3, and don't need to
be stored.
static constexpr int mPosMap[6] = {-1, 0, -1, -1, -1, 1};
public:
Primes23(size_t size) : PrimesVB((size + 2) / 3), mSize(size) {};
virtual void unprime(size_t value) override
{
if (value <= 3) return;
auto page = value / 6;
auto line = mPosMap[value % 6];
if (line >= 0)
{
PrimesVB::unprime(page * 2 + line);
}
}

virtual bool get(size_t value) const override
{
if (value <= 3) return true;

auto page = value / 6; // 5=0 7=1 11=1 13=2
auto line = mPosMap[value % 6]; // 5=2 7=0 11=2 13=0
if (line >= 0)
{
return PrimesVB::get(page * 2 + line);
}
else
{
return false;
}
}

virtual size_t size() const override
{
return mSize;
}
};


// Simple sieve of Eratosthenes function
void sieve(Primes& store)
{
const size_t storesize = store.size();
const size_t maxfilter = std::ceil(std::sqrt(storesize));

size_t prime = 2;
while (prime < maxfilter)
{
// Unmark all the multiples
for (size_t inval = prime*2; inval < storesize; inval+=
prime)
store.unprime(inval);

// Find the next prime to filter with
while (!store.get(++prime) && prime < maxfilter) {};
}
}

// Benchmark function. Returns the constructed collection for validation.
template <typename storetype> storetype test(const char* classname)
{
stopwatch s;
s.start();
storetype store(PrimeCount);
s.stop();
std::cout << classname << " construct " << s.delta() << "
seconds." << std::endl;

s.start();
sieve(store);
s.stop();
std::cout << classname << " sieve " << s.delta() << " seconds."
<< std::endl;

return store;
}

int main()
{
auto vb = test<PrimesVB>("vector<bool>");
auto p2 = test<Primes2>("Storing odds only");
auto p23 = test<Primes23>("Not storing 2s and 3s");

// double check they all match.
for (auto p = 1; p < PrimeCount; ++p)
{
assert(vb.get(p) == p2.get(p));
assert(vb.get(p) == p23.get(p));
}

return 0;
}

Vir Campestris

unread,
Dec 13, 2023, 10:25:56 AM12/13/23
to
On 13/12/2023 15:16, Vir Campestris wrote:
> Well, I went away and tried it. I didn't write anything nearly as
> sophisticated as yours - I didn't worry about threads and caching. The
> code follows. You'll have to unwrap it in a few places.
>
> It occurred to me I could go further - not just optimise it out to store
> odd numbers only, but also miss out the ones divisible by 3, or other
> higher primes. The results were:
> - Storing all numbers: 9.494 seconds to run the sieve
> - Storing odd numbers only: 4.455. That's over twice as fast.
> - Excluding also the ones divisible by 3: 3.468. That's an improvement,
> but not as much as I expected. Perhaps it's got coo complicated. I don't
> have a high powered PC.

And no sooner had I posted that than I realised I'd got the optimisation
settings wrong.

With -Ofast fed to g++ the version that doesn't store multiples of 3 is
_slower_ than the odds only one!

Andy

Vir Campestris

unread,
Dec 14, 2023, 10:06:27 AM12/14/23
to
I tried a few more collections.

The slowest on my system is vector<bool> 12.165s for 1e9 primes.

Having a manual bitmask, and storing in uint64_t is a lot faster -
almost twice as fast (6.659).

A uint32_t is a little faster than that (6.306).

Optimising it to store odds only more than doubles the speed of
vector<bool> to 5.107 seconds.

That has less effect with uint64_t, taking it to 4.946 - which is the
best time I have.

Oddly storing odds only with uint32_t is not as fast as odds only with
uint64_t, although it is still faster than storing all the values (5.302).

I've optimised it to do less recursion, which has helped, but it still
isn't worth not storing the multiples of 3. I'll guess that's because
that code required a divide and mod by 6, whereas optimising out the
evens only requires shifts and masks.

Andy

red floyd

unread,
Dec 14, 2023, 11:20:31 AM12/14/23
to
On 12/13/2023 7:16 AM, Vir Campestris wrote:
> On 11/12/2023 17:19, Bonita Montero wrote:
>> Am 11.12.2023 um 18:12 schrieb Vir Campestris:
>>
>>> Because all primes except 2 are odd you don't need to store the
>>> _even_ numbers, which is what I meant to say. Or else you only need
>>> to store the odd ones.
>>
>> I'll try that the next days; but I don't expect a significant change.
>>
> Well, I went away and tried it. I didn't write anything nearly as
> sophisticated as yours - I didn't worry about threads and caching. The
> code follows. You'll have to unwrap it in a few places.
>
> It occurred to me I could go further - not just optimise it out to store
> odd numbers only, but also miss out the ones divisible by 3, or other
> higher primes. The results were:
> - Storing all numbers: 9.494 seconds to run the sieve
> - Storing odd numbers only: 4.455. That's over twice as fast.
> - Excluding also the ones divisible by 3: 3.468. That's an improvement,
> but not as much as I expected. Perhaps it's got coo complicated. I don't
> have a high powered PC.

Another way to attempt to optimize is that except for 2 and 3, all
primes are of the form 6n+1 or 6n-1.



Bonita Montero

unread,
Dec 20, 2023, 7:45:01 AM12/20/23
to
Am 11.12.2023 um 18:12 schrieb Vir Campestris:

> Because all primes except 2 are odd you don't need to store the _even_
> numbers, which is what I meant to say. Or else you only need to store
> the odd ones.

I changed the code this morning:

#include <cstdlib>
#include <charconv>
#include <cstring>
#include <vector>
#include <algorithm>
#include <cmath>
#include <bit>
#include <fstream>
#include <string_view>
#include <thread>
#include <utility>
#include <new>
#include <span>
#include <array>
#include <cassert>
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__GNUC__) || defined(__clang__)
#include <cpuid.h>
#endif

#if defined(_MSC_VER)
#pragma warning(disable: 26495)
#endif

using namespace std;

#if defined(__cpp_lib_hardware_interference_size)
constexpr ptrdiff_t CL_SIZE = hardware_destructive_interference_size;
#else
constexpr ptrdiff_t CL_SIZE = 64;
#endif

size_t getL2Size();
bool smt();

int main( int argc, char **argv )
{
if( argc < 2 )
return EXIT_FAILURE;
auto parse = []( char const *str, auto &value )
{
bool hex = str[0] == '0' && (str[1] == 'x' || str[1] == 'X');
str += 2 * hex;
auto [ptr, ec] = from_chars( str, str + strlen( str ), value, !hex ?
10 : 16 );
return ec == errc() && !*ptr;
};
size_t end;
if( !parse( argv[1], end ) )
return EXIT_FAILURE;
if( end < 2 || (ptrdiff_t)end++ < 0 )
throw bad_alloc();
unsigned
hc = jthread::hardware_concurrency(),
nThreads;
if( argc < 4 || !parse( argv[3], nThreads ) )
nThreads = hc;
if( !nThreads )
return EXIT_FAILURE;
using word_t = size_t;
constexpr size_t
BITS_PER_CL = CL_SIZE * 8,
BITS = sizeof(word_t) * 8,
DBITS = 2 * BITS;
size_t nBits = end / 2 + (end & 1 ^ 1);
union alignas(CL_SIZE) ndi_words_cl { word_t words[CL_SIZE /
sizeof(word_t)]; ndi_words_cl() {} };
vector<ndi_words_cl> sieveCachelines( (nBits + BITS_PER_CL - 1) /
BITS_PER_CL );
span<word_t> sieve( &sieveCachelines[0].words[0], (nBits + BITS - 1) /
BITS );
fill( sieve.begin(), sieve.end(), (word_t)-1 );
size_t sqrt = (ptrdiff_t)ceil( ::sqrt( (ptrdiff_t)end ) );
auto punch = [&]( auto, size_t start, size_t end, size_t prime )
{
assert(start & 1);
size_t bit = start / 2;
end = end / 2 + (end & 1 ^ 1);
if( bit >= end )
return;
if( prime >= BITS ) [[likely]]
do [[likely]]
sieve[bit / BITS] &= rotl( (word_t)-2, bit % BITS );
while( (bit += prime) < end );
else
{
auto pSieve = sieve.begin() + bit / BITS;
do [[likely]]
{
word_t
word = *pSieve,
mask = rotl( (word_t)-2, bit % BITS ),
oldMask;
do
word &= mask,
oldMask = mask,
mask = rotl( mask, prime % BITS ),
bit += prime;
while( mask < oldMask );
*pSieve++ = word;
} while( bit < end );
}
};
for( size_t prime = 3; prime < sqrt; prime += 2 ) [[likely]]
{
auto pSieve = sieve.begin() + prime / DBITS;
do [[likely]]
if( word_t w = *pSieve >> prime / 2 % BITS; w ) [[likely]]
{
prime += 2 * countr_zero( w );
break;
}
while( (prime = (prime + DBITS & -(ptrdiff_t)DBITS) + 1) < sqrt );
if( prime >= sqrt ) [[unlikely]]
break;
punch( false_type(), prime * prime, sqrt, prime );
}
auto scan = [&]( size_t start, size_t end, auto fn )
{
assert(start & 1);
start /= 2;
end = end / 2 + (end & 1 ^ 1);
if( start >= end )
return;
auto pSieve = sieve.begin() + start / BITS;
for( size_t bit = start; ; )
{
word_t word = *pSieve++ >> bit % BITS;
bool hasBits = word;
for( unsigned gap; word; word >>= gap, word >>= 1 ) [[likely]]
{
gap = countr_zero( word );
if( (bit += gap) >= end ) [[unlikely]]
return;
fn( bit * 2 + 1, true_type() );
if( ++bit >= end ) [[unlikely]]
return;
}
if( bit >= end ) [[unlikely]]
break;
bit = (bit + BITS - hasBits) & -(ptrdiff_t)BITS;
}
};
static auto dbl = []( ptrdiff_t x ) { return (double)x; };
double threadTange = dbl( end - sqrt ) / nThreads;
using range_t = pair<size_t, size_t>;
vector<pair<size_t, size_t>> ranges;
ranges.reserve( nThreads );
for( size_t t = nThreads, rEnd = end, rStart; t--; rEnd = rStart )
[[likely]]
{
rStart = sqrt + (ptrdiff_t)((ptrdiff_t)t * threadTange + 0.5);
size_t rClStart = rStart & -(2 * CL_SIZE * 8);
rStart = rClStart >= sqrt ? rClStart : sqrt;
if( rStart < rEnd )
ranges.emplace_back( rStart, rEnd );
}
double maxCacheRange = dbl( getL2Size() * (8 * 2) ) / 3 * (smt() &&
nThreads > hc / 2 ? 1 : 2);
vector<jthread> threads;
threads.reserve( ranges.size() - 1 );
for( range_t const &range : ranges )
{
auto cacheAwarePunch = [&]( size_t rStart, size_t rEnd )
{
double thisThreadRange = dbl( rEnd - rStart );
ptrdiff_t nCachePartitions = (ptrdiff_t)ceil( thisThreadRange /
maxCacheRange );
double cacheRange = thisThreadRange / dbl( nCachePartitions );
for( size_t p = nCachePartitions, cacheEnd = rEnd, cacheStart; p--;
cacheEnd = cacheStart ) [[likely]]
{
cacheStart = rStart + (ptrdiff_t)((double)(ptrdiff_t)p * cacheRange
+ 0.5);
cacheStart &= -(2 * CL_SIZE * 8);
scan( 3, sqrt,
[&]( size_t prime, auto )
{
size_t start = ((cacheStart >= sqrt ? cacheStart : sqrt) + prime -
1) / prime * prime;
start = start & 1 ? start : start + prime;
punch( true_type(), start, cacheEnd, prime );
} );
}
};
if( &range != &ranges.back() )
threads.emplace_back( cacheAwarePunch, range.first, range.second );
else
cacheAwarePunch( range.first, range.second );
}
threads.resize( 0 );
if( argc >= 3 && !*argv[2] )
return EXIT_SUCCESS;
ofstream ofs;
ofs.exceptions( ofstream::failbit | ofstream::badbit );
ofs.open( argc >= 3 ? argv[2] : "primes.txt", ofstream::binary |
ofstream::trunc );
constexpr size_t
BUF_SIZE = 0x100000,
AHEAD = 32;
union ndi_char { char c; ndi_char() {} };
vector<ndi_char> rawPrintBuf( BUF_SIZE + AHEAD - 1 );
span printBuf( &rawPrintBuf.front().c, &rawPrintBuf.back().c + 1 );
auto
bufBegin = printBuf.begin(),
bufEnd = bufBegin,
bufFlush = bufBegin + BUF_SIZE;
auto print = [&]() { ofs << string_view( bufBegin, bufEnd ); };
auto printPrime = [&]( size_t prime, auto )
{
auto [ptr, ec] = to_chars( &*bufEnd, &bufEnd[AHEAD - 1], prime );
if( ec != errc() ) [[unlikely]]
throw system_error( (int)ec, generic_category(), "converson failed" );
bufEnd = ptr - &*bufBegin + bufBegin; // pointer to iterator - NOP
*bufEnd++ = '\n';
if( bufEnd >= bufFlush ) [[unlikely]]
print(), bufEnd = bufBegin;
};
printPrime( 2, false_type() );
scan( 3, end, printPrime );
print();
}

array<unsigned, 4> cpuId( unsigned code )
{
array<unsigned, 4> regs;
#if defined(_MSC_VER)
__cpuid( (int *)&regs[0], code );
#elif defined(__GNUC__) || defined(__clang__)
__cpuid(code, regs[0], regs[1], regs[2], regs[3]);
#endif
return regs;
}

bool smt()
{
if( cpuId( 0 )[0] < 1 )
return false;
return cpuId( 1 )[3] >> 28 & 1;
}

size_t getL2Size()
{
if( cpuId( 0x80000000u )[0] < 0x80000006u )
return 512ull * 1024;
return (cpuId( 0x80000006u )[2] >> 16) * 1024;
}

Now the code runs 0.15s on the 16 core Zen4 system, that's +87% faster.
On th4e 64 core Zen2 system the time to produce the first 21 million
primes is about 0.05 seconds.

Vir Campestris

unread,
Dec 21, 2023, 10:30:26 AM12/21/23
to
On 20/12/2023 12:44, Bonita Montero wrote:
> Now the code runs 0.15s on the 16 core Zen4 system, that's +87% faster.
> On th4e 64 core Zen2 system the time to produce the first 21 million
> primes is about 0.05 seconds.

I thought I'd try it.

Can I make a suggestion? Where it says

if( argc < 2 )
return EXIT_FAILURE;

make it instead print a user guide?

On my system my code takes about 20 seconds to produce 1e9 primes. It's
single threaded. Yours is taking a little over 6, but about 18 seconds
of CPU time. (I have 4 cores, each with 2 threads. Mine is designed to
be cool and quiet...)

I've obviously got something wrong though - I'm using a _lot_ more
memory than you.

Out of interest - I thought you must be Spanish from your name. And yet
you speak fluent German?

Andy

Bonita Montero

unread,
Dec 21, 2023, 11:07:58 AM12/21/23
to
Am 21.12.2023 um 16:30 schrieb Vir Campestris:

> Can I make a suggestion? Where it says
>
>     if( argc < 2 )
>         return EXIT_FAILURE;
>
> make it instead print a user guide?

The code isnt for an application which is ready-to-use but just
a technology experiment.

> On my system my code takes about 20 seconds to produce 1e9 primes. It's
> single threaded. Yours is taking a little over 6, but about 18 seconds
> of CPU time. (I have 4 cores, each with 2 threads. Mine is designed to
> be cool and quiet...)

bg --times --high bitmapSieve 0x1000000000 "" // bg is my own tool
real 2.471s
user 1m:0.313s
sys 0.234s
cylces 273.769.270.155

As you can see my code spends an minute CPU-time in about 2.5 seconds.
Interestingly the whole code largely depends on the performance of the
L2 cache. If more than one of my punch-threads occupies one core there
is almost no performance-improvement since both threads depend on the
same L2 cache.

> I've obviously got something wrong though - I'm using a _lot_ more
> memory than you.

I'm using a bitmap to store the odd values.

> Out of interest - I thought you must be Spanish from your name.
> And yet you speak fluent German?

My parents come from Peru.


Bonita Montero

unread,
Dec 21, 2023, 11:13:52 AM12/21/23
to
Am 21.12.2023 um 16:30 schrieb Vir Campestris:

> On my system my code takes about 20 seconds to produce 1e9 primes. It's
> single threaded. Yours is taking a little over 6, but about 18 seconds
> of CPU time. (I have 4 cores, each with 2 threads. Mine is designed to
> be cool and quiet...)

I forgot to mention that I'm using "" as the second commandline
parameter, ich suppresses the file output. Producing a 2.2GB
file from all primes that fit in a 32 bit number costs an extra
two seconds for my system with a PCIe 4.0 SSD from Samsung.


Chris M. Thomasson

unread,
Dec 21, 2023, 5:23:18 PM12/21/23
to
On 12/10/2023 1:46 AM, Bonita Montero wrote:
> union alignas(CL_SIZE) ndi_words_cl { word_t words[CL_SIZE /
> sizeof(word_t)]; ndi_words_cl() {} };

alignas is pretty damn convenient. Iirc, it pads and aligns? Iirc, it
even works with std::vector.

Bonita Montero

unread,
Dec 21, 2023, 10:29:14 PM12/21/23
to
Am 21.12.2023 um 23:23 schrieb Chris M. Thomasson:

> alignas is pretty damn convenient. Iirc, it pads and aligns? Iirc, it
> even works with std::vector.

The problem with that is that I can't have an alignas on a word_t since
the rest of the cacheline would be padded. So I join as much word_t's as
possible in a single ndi_word_s_cl and have a span later on it to have
full iterator debugging.

Chris M. Thomasson

unread,
Dec 21, 2023, 11:02:22 PM12/21/23
to
Yup. This is a good practice indeed as long as they are related
logically, it will be beneficial to do this. Say a cache line is say 64
bytes and a word of 8 bytes, can be packed 8 fulls words indeed. No
cache line straddling allowed, damn it!

aligned on a l2 cache line boundary:

struct l2_cache_line
{
word m_words[8];
};

Fine. I think alignas auto packs. I need to to bust out the most recent
C++ mode on my compilers to revisit it. Actually, I need to do it
anyway. The fun part is that I have old code that is interesting, well
before alignas:

https://pastebin.com/raw/f37a23918

Fun times! :^)


Bonita Montero

unread,
Dec 22, 2023, 11:55:30 AM12/22/23
to
Am 22.12.2023 um 05:02 schrieb Chris M. Thomasson:

> Fine. I think alignas auto packs. I need to to bust out the most recent
> C++ mode on my compilers to revisit it. Actually, I need to do it
> anyway. The fun part is that I have old code that is interesting, well
> before alignas:

False-sharing woudln't hurt my algorithm much since only the beginning
and the end of the thread-local segment overlaps with other thread; but
I do it anyway to have maximum performance.


Tim Rentsch

unread,
Dec 23, 2023, 1:21:17 PM12/23/23
to
Vir Campestris <vir.cam...@invalid.invalid> writes:

> On my system my code takes about 20 seconds to produce 1e9
> primes. [...]

Do you mean 1e9 primes or just the primes less than 1e9? To
do the first 1e9 primes a sieve would need to go up to about
23.9e9 (so half that many bits if only odd numbers were
represented).

Tim Rentsch

unread,
Dec 23, 2023, 1:30:33 PM12/23/23
to
A more compact form is to consider numbers mod 30, in which case
numbers that are 0 mod {2, 3, or 5} can be ignored. Conveniently
there are just 8 such values - 1, 7, 11, 13, 17, 19, 23, and 29 -
so each block of 30 can be represented in one 8-bit byte. Doing
that gives a 25% reduction in space compared to a 6n+/-1 scheme.

Chris M. Thomasson

unread,
Dec 23, 2023, 3:52:37 PM12/23/23
to
That's is a good habit to get into. :^)


Vir Campestris

unread,
Dec 23, 2023, 4:21:00 PM12/23/23
to
On 23/12/2023 18:30, Tim Rentsch wrote:
> A more compact form is to consider numbers mod 30, in which case
> numbers that are 0 mod {2, 3, or 5} can be ignored. Conveniently
> there are just 8 such values - 1, 7, 11, 13, 17, 19, 23, and 29 -
> so each block of 30 can be represented in one 8-bit byte. Doing
> that gives a 25% reduction in space compared to a 6n+/-1 scheme.

I found that on my system the modulo operation was so slow this wasn't
worth doing.

Andy

Vir Campestris

unread,
Dec 23, 2023, 4:21:32 PM12/23/23
to
Primes up to 1e9.

I have another idea though, watch this space...

Andy

Tim Rentsch

unread,
Dec 24, 2023, 3:36:56 AM12/24/23
to
Depending on how the code is written, no modulo operations need
to be done, because they will be optimized away and done at
compile time. If we look at multiplying two numbers represented
by bits in bytes i and j, the two numbers are

i*30 + a
j*30 + b

for some a and b in { 1, 7, 11, 13 17, 19, 23, 29 }.

The values we're interested in are the index of the product and
the residue of the product, namely

(i*30+a) * (j*30+b) / 30 (for the index)
(i*30+a) * (j*30+b) % 30 (for the residue)

Any term with a *30 in the numerator doesn't contribute to
the residue, and also can be combined with the by-30 divide
for computing the index. Thus these expressions can be
rewritten as

i*30*j + i*b + j*a + (a*b/30) (for the index)
a*b%30 (for the residue)

When a and b have values that are known at compile time,
neither the divide nor the remainder result in run-time
operations being done; all of that heavy lifting is
optimized away and done at compile time. Of course there
are some multiplies, but they are cheaper than divides, and
also can be done in parallel. (The multiplication a*b also
can be done at compile time.)

The residue needs to be turned into a bit mask to do the
logical operation on the byte of bits, but here again that
computation can be optimized away and done at compile time.

Does that all make sense?

Bonita Montero

unread,
Dec 24, 2023, 5:03:50 AM12/24/23
to
I experimentally removed the masking of the lower bits of the
partition bounds according to the cacheline size and there was
no measurable performance-loss.

Tim Rentsch

unread,
Dec 24, 2023, 1:50:03 PM12/24/23
to
Does your have enough memory to compute all the primes up
to 24e9? If it does I suggest that for your next milestone.

Chris M. Thomasson

unread,
Dec 24, 2023, 4:24:32 PM12/24/23
to
Still, imvvho, it _is_ a good practice to get into wrt padding and
aligning...

Bonita Montero

unread,
Dec 26, 2023, 12:00:12 AM12/26/23
to
Am 24.12.2023 um 22:24 schrieb Chris M. Thomasson:

> Still, imvvho, it _is_ a good practice to get into wrt padding and
> aligning...

With an upper bound of 2 ^ 32 I've got 131070 cachleines per thread.
If I have false sharing at the beginning and end of the range that
doesn't hurt much.

Chris M. Thomasson

unread,
Dec 26, 2023, 12:40:07 AM12/26/23
to
Remember when Intel first started hyperthreading and god damn threads
could false share with each other (on the stacks!) the low and high 64
byte parts of the 128 byte cache lines? A workaround was to artificially
offset the threads stacks via alloca.

Bonita Montero

unread,
Dec 26, 2023, 4:28:05 AM12/26/23
to
Am 26.12.2023 um 06:39 schrieb Chris M. Thomasson:

> Remember when Intel first started hyperthreading and god damn threads
> could false share with each other (on the stacks!) the low and high 64
> byte parts of the 128 byte cache lines? A workaround was to artificially
> offset the threads stacks via alloca.

If you have one core and two threads there's no false sharing.
It doesn't matter if the "conflicting" accesses come from either
thread of from one thread with that. False sharing is only pos-
sible with two cores or more.

Chris M. Thomasson

unread,
Dec 26, 2023, 3:25:08 PM12/26/23
to
On 12/26/2023 1:27 AM, Bonita Montero wrote:
> Am 26.12.2023 um 06:39 schrieb Chris M. Thomasson:
>
>> Remember when Intel first started hyperthreading and god damn threads
>> could false share with each other (on the stacks!) the low and high 64
>> byte parts of the 128 byte cache lines? A workaround was to
>> artificially offset the threads stacks via alloca.
>
> If you have one core and two threads there's no false sharing.

So, are you familiar with Intel's early hyper threading problem? There
was false sharing between the hyperhtreads. The workaround did improve
performance by quite a bit. IIRC, my older appcore project had this
workaround incorporated into it logic. I wrote that sucker back in very
early 2000's. Humm... I will try to find the exact line.

Kaz Kylheku

unread,
Dec 26, 2023, 6:35:22 PM12/26/23
to
On 2023-12-26, Chris M. Thomasson <chris.m.t...@gmail.com> wrote:
> On 12/26/2023 1:27 AM, Bonita Montero wrote:
>> Am 26.12.2023 um 06:39 schrieb Chris M. Thomasson:
>>
>>> Remember when Intel first started hyperthreading and god damn threads
>>> could false share with each other (on the stacks!) the low and high 64
>>> byte parts of the 128 byte cache lines? A workaround was to
>>> artificially offset the threads stacks via alloca.
>>
>> If you have one core and two threads there's no false sharing.
>
> So, are you familiar with Intel's early hyper threading problem? There
> was false sharing between the hyperhtreads. The workaround did improve
> performance by quite a bit. IIRC, my older appcore project had this
> workaround incorporated into it logic. I wrote that sucker back in very
> early 2000's. Humm... I will try to find the exact line.

Could you be both right? The performance problem was real, but maybe
it wasn't "false sharing"? The hyper-thread are the same core; they
share the same caches.

Might you be describing a cache collision rather than false sharing?

A single processor can trigger degenerate cache uses (at any level
of a cache hierarchy). For instance, if pages of virtual memory
are accessed in certain patterns, they can trash the same TLB entry.

Was it perhaps the case that these thread stacks were allocated at such
a stride, that their addresses clashed on the same cache line set?

That could be a problem even on one processor, but it's obvious that
hyperthreading could exacerbate it because switches between hyperthreads
happen on a fine granularity. They don't get to run a full
scheduler-driven time quantum. A low-level processor event like a
pipeline stall (or other resource issue) can drive a hyper thread
switch.

--
TXR Programming Language: http://nongnu.org/txr
Cygnal: Cygwin Native Application Library: http://kylheku.com/cygnal
Mastodon: @Kazi...@mstdn.ca
NOTE: If you use Google Groups, I don't see you, unless you're whitelisted.

Chris M. Thomasson

unread,
Dec 26, 2023, 6:37:35 PM12/26/23
to
On 12/26/2023 3:35 PM, Kaz Kylheku wrote:
> On 2023-12-26, Chris M. Thomasson <chris.m.t...@gmail.com> wrote:
>> On 12/26/2023 1:27 AM, Bonita Montero wrote:
>>> Am 26.12.2023 um 06:39 schrieb Chris M. Thomasson:
>>>
>>>> Remember when Intel first started hyperthreading and god damn threads
>>>> could false share with each other (on the stacks!) the low and high 64
>>>> byte parts of the 128 byte cache lines? A workaround was to
>>>> artificially offset the threads stacks via alloca.
>>>
>>> If you have one core and two threads there's no false sharing.
>>
>> So, are you familiar with Intel's early hyper threading problem? There
>> was false sharing between the hyperhtreads. The workaround did improve
>> performance by quite a bit. IIRC, my older appcore project had this
>> workaround incorporated into it logic. I wrote that sucker back in very
>> early 2000's. Humm... I will try to find the exact line.
>
> Could you be both right? The performance problem was real, but maybe
> it wasn't "false sharing"? The hyper-thread are the same core; they
> share the same caches.
>
> Might you be describing a cache collision rather than false sharing?

Iirc, when I read the docs from Intel, it was the low 64 bytes being
falsely shared with the high 64 bytes of a 128 byte l2 cache line?

Bonita Montero

unread,
Dec 27, 2023, 12:06:39 AM12/27/23
to
Am 26.12.2023 um 21:24 schrieb Chris M. Thomasson:

> So, are you familiar with Intel's early hyper threading problem?
> There was false sharing between the ...

False sharing can only happen between different cores.

Chris M. Thomasson

unread,
Dec 27, 2023, 1:00:10 AM12/27/23
to
On 12/26/2023 3:37 PM, Chris M. Thomasson wrote:
> On 12/26/2023 3:35 PM, Kaz Kylheku wrote:
>> On 2023-12-26, Chris M. Thomasson <chris.m.t...@gmail.com> wrote:
>>> On 12/26/2023 1:27 AM, Bonita Montero wrote:
>>>> Am 26.12.2023 um 06:39 schrieb Chris M. Thomasson:
>>>>
>>>>> Remember when Intel first started hyperthreading and god damn threads
>>>>> could false share with each other (on the stacks!) the low and high 64
>>>>> byte parts of the 128 byte cache lines? A workaround was to
>>>>> artificially offset the threads stacks via alloca.
>>>>
>>>> If you have one core and two threads there's no false sharing.
>>>
>>> So, are you familiar with Intel's early hyper threading problem? There
>>> was false sharing between the hyperhtreads. The workaround did improve
>>> performance by quite a bit. IIRC, my older appcore project had this
>>> workaround incorporated into it logic. I wrote that sucker back in very
>>> early 2000's. Humm... I will try to find the exact line.
>>
>> Could you be both right? The performance problem was real, but maybe
>> it wasn't "false sharing"? The hyper-thread are the same core; they
>> share the same caches.
>>
>> Might you be describing a cache collision rather than false sharing?
>
> Iirc, when I read the docs from Intel, it was the low 64 bytes being
> falsely shared with the high 64 bytes of a 128 byte l2 cache line?

Something about false interference between threads that should not even
be interacting with one another to begin with. It was a problem. The fix
was based on alloca, so that is something to ponder on.

Bonita Montero

unread,
Dec 27, 2023, 4:23:29 AM12/27/23
to
Am 27.12.2023 um 06:59 schrieb Chris M. Thomasson:

> Something about false interference between threads that should not even
> be interacting with one another to begin with. It was a problem. The fix
> was based on alloca, so that is something to ponder on.;

Like with any SMT-core there could be cache-thrashing between the
cores. The L1 data cache was only 8kB, maybe only two was associative
that could be thrashing between the cores. But I've no clue what this
would have to to with alloca().

Kaz Kylheku

unread,
Dec 27, 2023, 3:49:17 PM12/27/23
to
Say you have thread stacks that are offset by some power of two amount,
like a megabyte. Stack addresses at the same depth (like the local
variables of threads executing the same function) are likely to collide
on the same cache set (at different levels of the caching hierachy: L1,
L2, translation caches).

With alloca, since it moves the stack pointer, we can carve variable
amounts of stack space to randomize the stack offsets (before calling
the work functions). In different threads, we use differently-sized
alloca allocations.

Bonita Montero

unread,
Dec 28, 2023, 6:00:37 AM12/28/23
to
Am 27.12.2023 um 21:49 schrieb Kaz Kylheku:

> Say you have thread stacks that are offset by some power of two amount,
> like a megabyte. Stack addresses at the same depth (like the local
> variables of threads executing the same function) are likely to collide
> on the same cache set (at different levels of the caching hierachy: L1,
> L2, translation caches).
>> With alloca, since it moves the stack pointer, we can carve variable
> amounts of stack space to randomize the stack offsets (before calling
> the work functions). In different threads, we use differently-sized
> alloca allocations.

That's not sth. alloca() could alleviate. The startup-code inside the
userspace-part of the thread should randomize the starting address of
the stack within the size of a set.
Most resources on the net say that the Pentium 4's L1 data cache asso-
ciativity is eight, so there's not much chance of aliasing inside the
L1D cache.

Chris M. Thomasson

unread,
Dec 28, 2023, 6:38:14 PM12/28/23
to
The use of alloca to try to get around the problem in their (Intel's)
early hyperthreaded processors was real, and actually helped. It was in
the Intel docs.

Bonita Montero

unread,
Dec 28, 2023, 10:17:17 PM12/28/23
to
Am 29.12.2023 um 00:38 schrieb Chris M. Thomasson:

> The use of alloca to try to get around the problem in their (Intel's)
> early hyperthreaded processors was real, and actually helped. It was in
> the Intel docs.

I don't believe it.

Chris M. Thomasson

unread,
Dec 28, 2023, 11:59:02 PM12/28/23
to
Why not?

Bonita Montero

unread,
Dec 29, 2023, 4:58:38 AM12/29/23
to
Because I don't understand what's different with the access pattern
of alloca() and usual stack allocation. And if I google for "alloca
Pentium 4 site:intel.com" I can't find anything that fits.

David Brown

unread,
Dec 29, 2023, 7:57:11 AM12/29/23
to
There is nothing different from alloca() and ordinary stack allocations.
But alloca() makes it quick and easy to make large allocations, and to
do so with random sizes (or at least sizes that differ significantly
between threads, even if they are running the same code). Without
alloca(), you'd need to do something like arranging to call a recursive
function a random number of times before it then calls the next bit of
code in your thread. alloca() is simply far easier and faster.

Scott Lurndal

unread,
Dec 29, 2023, 11:02:05 AM12/29/23
to
See page 2-35 in the _Intel Pentium 4 Processor Optimization_
manual.

Bonita Montero

unread,
Dec 29, 2023, 11:04:40 AM12/29/23
to
Am 29.12.2023 um 13:56 schrieb David Brown:

> There is nothing different from alloca() and ordinary stack allocations.
> But alloca() makes it quick and easy to make large allocations, and to
> do so with random sizes (or at least sizes that differ significantly
> between threads, even if they are running the same code).

I've got my own class overflow_array<> which is like an array<> and a
vector<>. If you append more than the internal array<> can handle the
objects are moved to an internal vector. I think Boost's small_array
is similar to that.

> Without alloca(), you'd need to do something like arranging to call a
> recursive function a random number of times before it then calls the
> next bit of code in your thread.  alloca() is simply far easier and
> faster.

You've got strange ideas. alloca() has been completely removed from the
Linux kernel. The point is that if there's a fixed upper limit you would
allocate you could allocate it always statically.


Bonita Montero

unread,
Dec 29, 2023, 11:06:54 AM12/29/23
to
Am 29.12.2023 um 17:01 schrieb Scott Lurndal:

> See page 2-35 in the _Intel Pentium 4 Processor Optimization_
> manual.

Where can I find sth. referring to alloca() there ?

Kaz Kylheku

unread,
Dec 29, 2023, 12:29:13 PM12/29/23
to