On 4/27/2023 3:06 PM, David Jones wrote:
> I use the old Byte Magazine benchmark from the 1990s (bytemark, normalized to a 90Mhz Pentium). Running in on
> my laptop under VirtualBox with an Intel i5-1235U gets an integer score 6-7 times higher than my DS10L/466 Alpha.
> Interestingly, the floating point score is barely higher. Running the posted sieve code on both, the laptop is 11 times
> faster.
I use the code below which I not only have in C but also in
other languages.
Arne
native_test.c
=============
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "high_res_timer.h"
void printres(TIMECOUNT_T t1, TIMECOUNT_T t2, int n1, int n2, char *ops)
{
double xperf;
xperf = (double)n1 * (double)n2 / ((t2 - t1) * 1.0 / UNITS_PER_SECOND);
printf("%.2f million %s per second\n", xperf / 1000000, ops);
}
#define NINT 10000
#define NFP 1000
#define NSTR 100
#define N 1000000
void testint(int scale)
{
int i, j;
int nintscale, sum;
TIMECOUNT_T t1, t2;
nintscale = NINT / scale;
t1 = GET_TIMECOUNT;
for(i = 0; i < nintscale; i++)
{
sum = i;
for(j = 0; j < N; j++)
{
sum = ((sum + 1) * 2 + 1) / 2;
}
if(sum != (i + N))
{
printf("Integer test error\n");
exit(0);
}
}
t2 = GET_TIMECOUNT;
printres(t1, t2, nintscale, N, "integer operations");
}
void testfp(int scale)
{
int i, j;
int nfpscale;
double sum;
TIMECOUNT_T t1, t2;
nfpscale = NFP / scale;
t1 = GET_TIMECOUNT;
for(i = 0; i < nfpscale; i++)
{
sum = i;
for(j = 0; j < N; j++)
{
sum = ((sum + 1) * 2 + 1) / 2;
}
if(fabs(sum - (i + 1.5 * N)) > 1)
{
printf("Floating point test error\n");
exit(0);
}
}
t2 = GET_TIMECOUNT;
printres(t1, t2, nfpscale, N, "floating point operations");
}
#define ALFA "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
void teststr(int scale)
{
int i, j;
int ix, ix1, ix2;
int nstrscale;
char s[1000], buf[N+1];
TIMECOUNT_T t1, t2;
nstrscale = NSTR / scale;
t1 = GET_TIMECOUNT;
for(i = 0; i < nstrscale; i++)
{
strcpy(buf, "");
for(j = 0; j < N; j = j + 10)
{
strcpy(s, ALFA);
strcat(s, ALFA);
ix = (i + j) % strlen(ALFA);
strncat(buf + j, s + ix, 1);
strncat(buf + j, s + ix + 1, 2);
strncat(buf + j, s + ix + 3, 3);
strncat(buf + j, s + ix + 6 , 4);
}
ix1 = N / 3;
ix2 = 2 * N / 3;
if(strlen(buf) != N || buf[ix1] != ALFA[(i + ix1) %
strlen(ALFA)] || buf[ix2] != ALFA[(i + ix2) % strlen(ALFA)])
{
printf("String test error\n");
exit(0);
}
}
t2 = GET_TIMECOUNT;
printres(t1, t2, nstrscale, N / 10, "string operations");
}
#define REP 10
int main(int argc, char *argv[])
{
int i;
int scale;
printf("%d bit\n", (int)(sizeof(char *) * 8));
if(argc > 1)
{
scale = atoi(argv[1]);
}
else
{
scale = 1;
}
for(i = 0; i < REP; i++)
{
testint(scale);
}
for(i = 0; i < REP; i++)
{
testfp(scale);
}
for(i = 0; i < REP; i++)
{
teststr(scale);
}
return 0;
}
high_res_timer.h
================
#if defined(__vms) && (defined(__alpha) || defined(__ia64))
#include <starlet.h>
static long long int vms_get_timecount(void)
{
long int res;
long long int t;
res = sys$gettim(&t);
return (res & 1 == 1) ? t : 0;
}
#define TIMECOUNT_T long long int
#define GET_TIMECOUNT vms_get_timecount()
#define UNITS_PER_SECOND 10000000
#endif
#ifdef __unix
#include <time.h>
static long long int unix_get_timecount(void)
{
int res;
struct timespec t;
res = clock_gettime(CLOCK_MONOTONIC, &t);
return (res == 0) ? (t.tv_sec * 1000000000LL + t.tv_nsec) : 0;
}
#define TIMECOUNT_T long long int
#define GET_TIMECOUNT unix_get_timecount()
#define UNITS_PER_SECOND 1000000000
#endif
#ifdef _WIN32
#include <windows.h>
static long long int win32_get_timecount(void)
{
BOOL res;
LARGE_INTEGER t;
res = QueryPerformanceCounter(&t);
return res ? t.QuadPart : 0;
}
static long long int win32_units_per_second(void)
{
BOOL res;
LARGE_INTEGER t;
res = QueryPerformanceFrequency(&t);
return res ? t.QuadPart : 0;
}
#define TIMECOUNT_T long long int
#define GET_TIMECOUNT win32_get_timecount()
#define UNITS_PER_SECOND win32_units_per_second()
#endif
#ifndef TIMECOUNT_T
#error "TIMECOUNT_T not defined"
#endif
#ifndef GET_TIMECOUNT
#error "GET_TIMECOUNT not defined"
#endif
#ifndef UNITS_PER_SECOND
#error "UNITS_PER_SECOND not defined"
#endif