diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index eee7928..3984c5a 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -639,11 +639,30 @@
}
}
-// Expose freelive, freeSized, and freeDeferred for testing.
+// Expose freeSized for testing.
func FreeSized(p unsafe.Pointer, size uintptr, noscan bool) {
freeSized(p, size, noscan)
}
+// Expose gcAssistBytes for the current g for testing.
+func AssistCredit() int64 {
+ assistG := getg()
+ if assistG.m.curg != nil {
+ assistG = assistG.m.curg
+ }
+ return assistG.gcAssistBytes
+}
+
+// Expose gcBlackenEnabled for testing.
+func GcBlackenEnable() bool {
+ // Note we do a non-atomic load here.
+ // Some checks against gcBlackenEnabled (e.g., in mallocgc)
+ // are currently done via non-atomic load for performance reasons,
+ // but other checks are done via atomic load (e.g., in mgcmark.go),
+ // so interpreting this value in a test may be subtle.
+ return gcBlackenEnabled != 0
+}
+
const (
PageSize = pageSize
PallocChunkPages = pallocChunkPages
diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go
index c6ea819..13864b7 100644
--- a/src/runtime/malloc_test.go
+++ b/src/runtime/malloc_test.go
@@ -16,6 +16,7 @@
"reflect"
"runtime"
. "runtime"
+ "runtime/metrics"
"strings"
"sync"
"sync/atomic"
@@ -250,6 +251,7 @@
{"size=500", testFreeSized[[500]byte], true},
{"size=512", testFreeSized[[512]byte], true},
{"size=4096", testFreeSized[[4096]byte], true},
+ {"size=20000", testFreeSized[[20000]byte], true}, // big, not power of 2, not size class boundary
{"size=32KiB-8", testFreeSized[[1<<15 - 8]byte], true}, // max noscan small object for 64-bit
}
@@ -487,6 +489,121 @@
wg.Wait()
}
})
+
+ t.Run("assist-credit", func(t *testing.T) {
+ // Allocate and free the same size class repeatedly while
+ // verifying it results in a net zero change in assist credit.
+ // This helps double-check our manipulation of the assist credit
+ // during mallocgc/free, including in cases when there is
+ // internal fragmentation when the requested mallocgc size is
+ // smaller than the size class.
+ //
+ // Two main cases of current interest are when gcBlackenEnable is false
+ // and when true. The test attempts to exercise both by making
+ // sure some normal, never-freed allocs are also occurring.
+ //
+ // Verifying the desired behavior is a bit subtle. It would be nice if
+ // we could just measure the assist credit before and after an alloc/free pair
+ // and expect no net change, but that is insufficient. A GC might
+ // start or end, including between the alloc and free, or
+ // the allocation might cause a GC assist.
+ //
+ // We attempt to be resilient against such measurement interference
+ // by skipping reporting some measurements as failures, but do so
+ // infrequently enough that we hopefully can still catch bugs.
+ // In particular, this test currently does fail if we purposefully
+ // introduce bugs like if we never adjust the assist credit when
+ // reusing an object, or always adjust, or purposefully mishandle
+ // internal fragmentation (e.g., adjusting on reuse by the requested
+ // alloc size rather than the mspan allocation slot size).
+ //
+ // Finally, note that t.Logf can cause allocations and possibly start a GC
+ // or cause a GC assist, so some care is needed if modifying the logging here.
+
+ // If making changes related to this test, consider testing locally with
+ // larger counts, like 100K or 1M.
+ counts := []int{1, 2, 5, 10, 100, 1000 * stressMultiple}
+ // Dropping down to GOMAXPROCS=1 might help reduce noise.
+ defer GOMAXPROCS(GOMAXPROCS(1))
+ size := int64(unsafe.Sizeof(*new(T)))
+ for _, count := range counts {
+ // Start by forcing a GC to reset this g's assist credit
+ // and perhaps help us get a cleaner measurement of GC cycle count.
+ runtime.GC()
+ gcCyclesStart := metricReadUint64("/gc/cycles/total:gc-cycles")
+ deltaCount := 0
+ for i := range count {
+ // We do three allocations per loop, with the third allocation
+ // being the one we measure.
+
+ // The first allocation is without a corresponding free.
+ // (Otherwise, if we only had alloc/free pairs, we accumulate no or
+ // very little garbage and don't trigger any GCs in practice.
+ // We do an allocation that is 2x larger than our test allocation
+ // so that it is in a different span class and does not steal a
+ // freed object from our measured allocation.)
+ Escape(&[2]T{})
+
+ // The second allocation tries to ensure at least one reusable object
+ // on the mspan's free list when we do our measured allocation.
+ p := alloc()
+ free(p)
+
+ // Finally, our measured allocation, bracketed by measurements.
+ creditStart := AssistCredit()
+ blackenEnableStart := GcBlackenEnable()
+ p = alloc()
+ blackenEnableAfterAlloc := GcBlackenEnable()
+ creditAfterAlloc := AssistCredit()
+ free(p)
+ blackenEnableEnd := GcBlackenEnable()
+ creditEnd := AssistCredit()
+
+ if creditStart == 0 || creditAfterAlloc == 0 {
+ // This might mean a GC happened, causing a discontinuity
+ // in our assist credit tracking, so skip this iteration.
+ // This check might not be strictly needed currently, but we include
+ // it to be conservative against flakes and possible future change.
+ continue
+ }
+ if creditStart <= 2*size {
+ // The alloc might have triggered an assist, which is more complicated to analyze,
+ // so skip this iteration. If we have an error in our math, a different iteration
+ // should catch it. (The 2x size is conservative to accommodate rounding-up to size class.)
+ continue
+ }
+
+ delta := creditEnd - creditStart
+ if delta != 0 {
+ // Don't fail immediately. Instead, log it, and count that it happened so that we can
+ // compare a total count of deltas against our observed GC cycle count below.
+ t.Logf("assist credit change: %d (size=%d i=%d count=%d alloc delta=%d free delta=%d before=%d after=%d gcBlackenEnable:%v/%v/%v)",
+ delta, size, i, count, creditAfterAlloc-creditStart, creditEnd-creditAfterAlloc, creditStart, creditEnd,
+ blackenEnableStart, blackenEnableAfterAlloc, blackenEnableEnd)
+ deltaCount++
+ }
+ }
+
+ // Done with this alloc/free loop.
+ // We allow up to 1 non-zero assist credit delta per GC cycle,
+ // and fail if we observed more than that. In other words, if a GC starts and
+ // disrupts a measurement and causes us to increment deltaCount, we only want to let
+ // that GC disrupt a single measurement, and we want the remaining measurements
+ // while that GC is running to each be a net zero credit, and otherwise
+ // report a failure. (Removing this leniency would cause flakes currently; also,
+ // we are currently slightly more lenient than that description, because we compare
+ // the counts over the entire loop at the end, rather than checking every GC cycle.)
+ gcCyclesEnd := metricReadUint64("/gc/cycles/total:gc-cycles")
+ gcCycles := gcCyclesEnd - gcCyclesStart
+ if gcCycles > 0 {
+ t.Logf("gc cycles=%d assist credit changes=%d (size=%d count=%d)", gcCycles, deltaCount, size, count)
+ }
+ if deltaCount > int(gcCycles) {
+ t.Fatalf("observed more assist credit changes (%d) than GC cycles (%d) (size=%d count=%d)",
+ deltaCount, gcCycles, size, count)
+ }
+ }
+ })
}
}
@@ -734,3 +851,12 @@
t.Errorf("_mkmalloc tests failed: %v", err)
}
}
+
+func metricReadUint64(name string) uint64 {
+ s := []metrics.Sample{{Name: name}}
+ metrics.Read(s)
+ if s[0].Value.Kind() != metrics.KindUint64 {
+ panic(fmt.Sprintf("metric %q has kind %v, want KindUint64", name, s[0].Value.Kind()))
+ }
+ return s[0].Value.Uint64()
+}