I found that redis-cli.c uses gettimeofday to get the current time in usec unit.
1. gettimeofday will be obsolete.
2. gettimeofday is a systemcall but clock_gettime is a library in libc and faster.
I am really sorry that I am not familiar with Redis development process.
I just found that when I was doing performance test of my custom Linux kernel.
Please inform me if I'm doing wrong.
AND I am not able to test code for MAC because I don't have it.
Please somebody review it.
---
src/redis-cli.c | 53 +++++++++++++++++++++++++++++++------------------
1 file changed, 34 insertions(+), 19 deletions(-)
diff --git a/src/redis-cli.c b/src/redis-cli.c
index 3f5827fbe..6c459c452 100644
--- a/src/redis-cli.c
+++ b/src/redis-cli.c
@@ -37,6 +37,10 @@
#include <signal.h>
#include <unistd.h>
#include <time.h>
+#ifdef __MACH__
+#include <mach/clock.h>
+#include <mach/mach.h>
+#endif
#include <ctype.h>
#include <errno.h>
#include <sys/stat.h>
@@ -303,14 +307,28 @@ static void cliPushHandler(void *, void *);
uint16_t crc16(const char *buf, int len);
-static long long ustime(void) {
- struct timeval tv;
- long long ust;
+static long long nstime(void) {
+ struct timespec tv;
+ long long nst;
+#ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time
+ //
https://gist.github.com/jbenet/1087739+ clock_serv_t cclock;
+ mach_timespec_t mts;
+ host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
+ clock_get_time(cclock, &mts);
+ mach_port_deallocate(mach_task_self(), cclock);
+ tv->tv_sec = mts.tv_sec;
+ tv->tv_nsec = mts.tv_nsec;
+#else
+ clock_gettime(CLOCK_MONOTONIC, &tv);
+#endif
+ nst = ((long long)tv.tv_sec)*1000000000;
+ nst += tv.tv_nsec;
+ return nst;
+}
- gettimeofday(&tv, NULL);
- ust = ((long long)tv.tv_sec)*1000000;
- ust += tv.tv_usec;
- return ust;
+static long long ustime(void) {
+ return nstime()/1000;
}
static long long mstime(void) {
@@ -9617,16 +9635,16 @@ static void sigIntHandler(int s) {
static void intrinsicLatencyMode(void) {
long long test_end, run_time, max_latency = 0, runs = 0;
- run_time = (long long)config.intrinsic_latency_duration * 1000000;
- test_end = ustime() + run_time;
+ run_time = (long long)config.intrinsic_latency_duration * 1000000000;
+ test_end = nstime() + run_time;
signal(SIGINT, longStatLoopModeStop);
while(1) {
long long start, end, latency;
- start = ustime();
+ start = nstime();
compute_something_fast();
- end = ustime();
+ end = nstime();
latency = end-start;
runs++;
if (latency <= 0) continue;
@@ -9634,18 +9652,17 @@ static void intrinsicLatencyMode(void) {
/* Reporting */
if (latency > max_latency) {
max_latency = latency;
- printf("Max latency so far: %lld microseconds.\n", max_latency);
+ printf("Max latency so far: %lld nanoseconds.\n", max_latency);
}
- double avg_us = (double)run_time/runs;
- double avg_ns = avg_us * 1e3;
+ double avg_ns = (double)run_time/runs;
if (force_cancel_loop || end > test_end) {
printf("\n%lld total runs "
"(avg latency: "
"%.4f microseconds / %.2f nanoseconds per run).\n",
- runs, avg_us, avg_ns);
+ runs, (avg_ns/1e3), avg_ns);
printf("Worst run took %.0fx longer than the average latency.\n",
- max_latency / avg_us);
+ max_latency / avg_ns);
exit(0);
}
}
@@ -9748,7 +9765,6 @@ void testHintSuite(char *filename) {
int main(int argc, char **argv) {
int firstarg;
- struct timeval tv;
memset(&config.sslconfig, 0, sizeof(config.sslconfig));
config.conn_info.hostip = sdsnew("127.0.0.1");
@@ -9856,8 +9872,7 @@ int main(int argc, char **argv) {
}
#endif
- gettimeofday(&tv, NULL);
- init_genrand64(((long long) tv.tv_sec * 1000000 + tv.tv_usec) ^ getpid());
+ init_genrand64(ustime() ^ getpid());
/* Cluster Manager mode */
if (CLUSTER_MANAGER_MODE()) {
--
2.31.1