I write a benchmark, but the results of the following 3 cases are much different.
Which one is reliable?
package main
import (
"testing"
"runtime"
)
const N = 200 * 1000
type Element int64
var sum int
func Benchmark_MaybeOptimizedOut(b *testing.B) {
var a [N]Element
b.ResetTimer()
var g int
for i := 0; i < b.N; i++ {
for j := 0; j < len(a); j++ {
g += int(a[j])
}
}
}
func Benchmark_AssignToGlobal(b *testing.B) {
var a [N]Element
b.ResetTimer()
var g int
for i := 0; i < b.N; i++ {
for j := 0; j < len(a); j++ {
g += int(a[j])
}
}
sum = g
}
func Benchmark_KeepAlive(b *testing.B) {
var a [N]Element
b.ResetTimer()
var g int
for i := 0; i < b.N; i++ {
for j := 0; j < len(a); j++ {
g += int(a[j])
}
}
runtime.KeepAlive(&g)
}
/* output:
$ go test -bench=. -benchtime=3s
Benchmark_MaybeOptimizedOut-4 50000 87318 ns/op
Benchmark_AssignToGlobal-4 20000 205934 ns/op
Benchmark_KeepAlive-4 10000 620351 ns/op
*/