runtime,runtime/pprof: clean up goroutine leak profile writing
Cleaned up goroutine leak profile extraction, by removing the acquisition of goroutineProfile semaphore, and inlining the call to saveg when recording stacks; former use of doRecordGoroutineProfile had side-effects over goroutineProfile fields.
Added regression tests for goroutine leak profiling frontend for binary and debug=1 profile formats.
Added stress tests for concurrent goroutine and goroutine leak profile requests.
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 19e90fb..54fdc35 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -1261,7 +1261,7 @@
//go:linkname pprof_goroutineLeakProfileWithLabels
func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
- return goroutineLeakProfileWithLabelsConcurrent(p, labels)
+ return goroutineLeakProfileWithLabels(p, labels)
}
// labels may be nil. If labels is non-nil, it must have the same length as p.
@@ -1323,30 +1323,30 @@
return work.goroutineLeak.count, false
}
- // Use the same semaphore as goroutineProfileWithLabelsConcurrent,
- // because ultimately we still use goroutine profiles.
- semacquire(&goroutineProfile.sema)
-
// Unlike in goroutineProfileWithLabelsConcurrent, we don't need to
// save the current goroutine stack, because it is obviously not leaked.
-
+ // We also do not need acquire any semaphores on goroutineProfile, because
+ // we don't use it for storage.
pcbuf := makeProfStack() // see saveg() for explanation
// Prepare a profile large enough to store all leaked goroutines.
n = work.goroutineLeak.count
if n > len(p) {
- // There's not enough space in p to store the whole profile, so (per the
- // contract of runtime.GoroutineProfile) we're not allowed to write to p
- // at all and must return n, false.
- semrelease(&goroutineProfile.sema)
+ // There's not enough space in p to store the whole profile, so
+ // we're not allowed to write to p at all and must return n, false.
return n, false
}
// Visit each leaked goroutine and try to record its stack.
+ var offset int
forEachGRace(func(gp1 *g) {
if readgstatus(gp1) == _Gleaked {
- doRecordGoroutineProfile(gp1, pcbuf)
+ systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &p[offset], pcbuf) })
+ if labels != nil {
+ labels[offset] = gp1.labels
+ }
+ offset++
}
})
@@ -1354,7 +1354,6 @@
raceacquire(unsafe.Pointer(&labelSync))
}
- semrelease(&goroutineProfile.sema)
return n, true
}
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index b816833..7f807e9 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -11,6 +11,7 @@
"context"
"fmt"
"internal/abi"
+ "internal/goexperiment"
"internal/profile"
"internal/syscall/unix"
"internal/testenv"
@@ -774,7 +775,11 @@
for {
go func() {
growstack1()
- c <- true
+ // NOTE(vsaioc): This goroutine may leak without this select.
+ select {
+ case c <- true:
+ case <-time.After(duration):
+ }
}()
select {
case <-t:
@@ -1565,6 +1570,228 @@
return true
}
+func goroutineLeakExample() {
+ <-make(chan struct{})
+ panic("unreachable")
+}
+
+func TestGoroutineLeakProfileConcurrency(t *testing.T) {
+ if !goexperiment.GoroutineLeakProfile {
+ // Do not run this test if the experimental flag is not enabled.
+ t.Skip("goroutine leak profile is not enabled")
+ }
+ const leakCount = 3
+
+ testenv.MustHaveParallelism(t)
+ regexLeakCount := regexp.MustCompile("goroutineleak profile: total ")
+ whiteSpace := regexp.MustCompile("\\s+")
+
+ // Regular goroutine profile. Used to check that there is no interference between
+ // the two profile types.
+ goroutineProf := Lookup("goroutine")
+ goroutineLeakProf := Lookup("goroutineleak")
+
+ // Check that a profile with debug information contains
+ includesLeak := func(t *testing.T, name, s string) {
+ if !strings.Contains(s, "runtime/pprof.goroutineLeakExample") {
+ t.Errorf("%s profile does not contain expected leaked goroutine (runtime/pprof.goroutineLeakExample): %s", name, s)
+ }
+ }
+
+ checkFrame := func(i int, j int, locations []*profile.Location, expectedFunctionName string) {
+ if len(locations) <= i {
+ t.Errorf("leaked goroutine stack locations out of range at %d of %d", i+1, len(locations))
+ return
+ }
+ location := locations[i]
+ if len(location.Line) <= j {
+ t.Errorf("leaked goroutine stack location lines out of range at %d of %d", j+1, len(location.Line))
+ return
+ }
+ if location.Line[j].Function.Name != expectedFunctionName {
+ t.Errorf("leaked goroutine stack expected %s as the location[%d].Line[%d] but found %s (%s:%d)", expectedFunctionName, i, j, location.Line[j].Function.Name, location.Line[j].Function.Filename, location.Line[j].Line)
+ }
+ }
+
+ // We use this helper to count the total number of leaked goroutines in the profile.
+ //
+ // NOTE(vsaioc): This value should match for the number of leaks produced in this test,
+ // but other tests could also leak goroutines, in which case we would have a mismatch
+ // when bulk-running tests.
+ //
+ // The two mismatching outcomes are therefore:
+ // - More leaks than expected, which is a correctness issue with other tests.
+ // In this case, this test effectively checks other tests wrt
+ // goroutine leaks running tests in bulk (e.g., by running all.bash).
+ //
+ // - Fewer leaks than expected; this is an unfortunate symptom of scheduling
+ // non-determinism, which may occur once in a blue moon. We make
+ // a best-effort attempt to allow the expected leaks to occur, by yielding
+ // the main thread, but it is never a guarantee.
+ countLeaks := func(t *testing.T, number int, s string) {
+ // Strip the profile header
+ parts := regexLeakCount.Split(s, -1)
+ if len(parts) < 2 {
+ t.Fatalf("goroutineleak profile does not contain 'goroutineleak profile: total ': %s\nparts: %v", s, parts)
+ return
+ }
+
+ parts = whiteSpace.Split(parts[1], -1)
+
+ count, err := strconv.ParseInt(parts[0], 10, 64)
+ if err != nil {
+ t.Fatalf("goroutineleak profile count is not a number: %s\nerror: %v", s, err)
+ }
+
+ // Check that the total number of leaked goroutines is exactly the expected number.
+ if count != int64(number) {
+ t.Errorf("goroutineleak profile does not contain exactly %d leaked goroutines: %d", number, count)
+ }
+ }
+
+ checkLeakStack := func(t *testing.T) func(pc uintptr, locations []*profile.Location, _ map[string][]string) {
+ return func(pc uintptr, locations []*profile.Location, _ map[string][]string) {
+ if pc != leakCount {
+ t.Errorf("expected %d leaked goroutines with specific stack configurations, but found %d", leakCount, pc)
+ return
+ }
+ switch len(locations) {
+ case 4:
+ // We expect a receive operation. This is the typical stack.
+ checkFrame(0, 0, locations, "runtime.gopark")
+ checkFrame(1, 0, locations, "runtime.chanrecv")
+ checkFrame(2, 0, locations, "runtime.chanrecv1")
+ switch len(locations[3].Line) {
+ case 2:
+ // Running `go func() { goroutineLeakExample() }()` will produce a stack with 2 lines.
+ // The anonymous function will have the call to goroutineLeakExample inlined.
+ checkFrame(3, 1, locations, "runtime/pprof.TestGoroutineLeakProfileConcurrency.func5")
+ fallthrough
+ case 1:
+ // Running `go goroutineLeakExample()` will produce a stack with 1 line.
+ checkFrame(3, 0, locations, "runtime/pprof.goroutineLeakExample")
+ default:
+ t.Errorf("leaked goroutine stack location expected 1 or 2 lines in the 4th location but found %d", len(locations[3].Line))
+ return
+ }
+ default:
+ message := fmt.Sprintf("leaked goroutine stack expected 4 or 5 locations but found %d", len(locations))
+ for _, location := range locations {
+ for _, line := range location.Line {
+ message += fmt.Sprintf("\n%s:%d", line.Function.Name, line.Line)
+ }
+ }
+ t.Errorf("%s", message)
+ }
+ }
+ }
+ // Leak some goroutines that will feature in the goroutine leak profile
+ for i := 0; i < leakCount; i++ {
+ go goroutineLeakExample()
+ go func() {
+ // Leak another goroutine that will feature a slightly different stack.
+ // This includes the frame runtime/pprof.TestGoroutineLeakProfileConcurrency.func1.
+ goroutineLeakExample()
+ panic("unreachable")
+ }()
+ // Yield several times to allow the goroutines to leak.
+ runtime.Gosched()
+ runtime.Gosched()
+ }
+
+ // Give all goroutines a chance to leak.
+ time.Sleep(time.Second)
+
+ t.Run("profile contains leak", func(t *testing.T) {
+ var w strings.Builder
+ goroutineLeakProf.WriteTo(&w, 0)
+ parseProfile(t, []byte(w.String()), checkLeakStack(t))
+ })
+
+ t.Run("leak persists between sequential profiling runs", func(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ var w strings.Builder
+ goroutineLeakProf.WriteTo(&w, 0)
+ parseProfile(t, []byte(w.String()), checkLeakStack(t))
+ }
+ })
+
+ // Concurrent calls to the goroutine leak profiler should not trigger data races
+ // or corruption.
+ t.Run("overlapping profile requests", func(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithTimeout(ctx, time.Second)
+ defer cancel()
+
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ wg.Add(1)
+ Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
+ go func() {
+ defer wg.Done()
+ for ctx.Err() == nil {
+ var w strings.Builder
+ goroutineLeakProf.WriteTo(&w, 1)
+ // NOTE(vsaioc): We cannot always guarantee that the leak will actually be recorded in
+ // the profile when making concurrent goroutine leak requests, because the GC runs
+ // concurrently with the profiler and may reset the leaked goroutine status before
+ // a concurrent profiler has the chance to record it.
+ //
+ // However, the goroutine leak count will persist. Still, we give some leeway by making
+ // it an inequality, just in case other tests in the suite start leaking goroutines.
+ countLeaks(t, 2*leakCount, w.String())
+ }
+ }()
+ })
+ }
+ wg.Wait()
+ })
+
+ // Concurrent calls to the goroutine leak profiler should not trigger data races
+ // or corruption, or interfere with regular goroutine profiles.
+ t.Run("overlapping goroutine and goroutine leak profile requests", func(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithTimeout(ctx, time.Second)
+ defer cancel()
+
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ wg.Add(2)
+ Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
+ go func() {
+ defer wg.Done()
+ for ctx.Err() == nil {
+ var w strings.Builder
+ goroutineLeakProf.WriteTo(&w, 1)
+ // NOTE(vsaioc): We cannot always guarantee that the leak will
+ // actually be recorded in the profile during concurrent
+ // goroutine leak profile requests. The GC runs concurrently with
+ // the profiler and may reset the leaked goroutine status before
+ // the profiler has the chance to record the leaked stacks.
+ //
+ // However, the leaked goroutine count is not reset.
+ // Other tests are not expected to leak goroutines,
+ // so the leak count is expected to be consistent.
+ countLeaks(t, 2*leakCount, w.String())
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ for ctx.Err() == nil {
+ var w strings.Builder
+ goroutineProf.WriteTo(&w, 1)
+ // The regular goroutine profile should see the leaked
+ // goroutines. We simply check that the goroutine leak
+ // profile does not corrupt the goroutine profile state.
+ includesLeak(t, "goroutine", w.String())
+ }
+ }()
+ })
+ }
+ wg.Wait()
+ })
+}
+
func TestGoroutineProfileConcurrency(t *testing.T) {
testenv.MustHaveParallelism(t)
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
I spotted some possible problems with your PR:
1. You have a long 244 character line in the commit message body. Please add line breaks to long lines that should be wrapped. Lines in the commit message body should be wrapped at ~76 characters unless needed for things like URLs or tables. (Note: GitHub might render long lines as soft-wrapped, so double-check in the Gerrit commit message shown above.)
2. You usually need to reference a bug number for all but trivial or cosmetic fixes. For this repo, the format is usually 'Fixes #12345' or 'Updates #12345' at the end of the commit message. Should you have a bug reference?
Please address any problems by updating the GitHub PR.
When complete, mark this comment as 'Done' and click the [blue 'Reply' button](https://go.dev/wiki/GerritBot#i-left-a-reply-to-a-comment-in-gerrit-but-no-one-but-me-can-see-it) above. These findings are based on heuristics; if a finding does not apply, briefly reply here saying so.
To update the commit title or commit message body shown here in Gerrit, you must edit the GitHub PR title and PR description (the first comment) in the GitHub web interface using the 'Edit' button or 'Edit' menu entry there. Note: pushing a new commit to the PR will not automatically update the commit message used by Gerrit.
For more details, see:
(In general for Gerrit code reviews, the change author is expected to [log in to Gerrit](https://go-review.googlesource.com/login/) with a Gmail or other Google account and then close out each piece of feedback by marking it as 'Done' if implemented as suggested or otherwise reply to each review comment. See the [Review](https://go.dev/doc/contribute#review) section of the Contributing Guide for details.)
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
I spotted some possible problems with your PR:
1. You have a long 244 character line in the commit message body. Please add line breaks to long lines that should be wrapped. Lines in the commit message body should be wrapped at ~76 characters unless needed for things like URLs or tables. (Note: GitHub might render long lines as soft-wrapped, so double-check in the Gerrit commit message shown above.)
2. You usually need to reference a bug number for all but trivial or cosmetic fixes. For this repo, the format is usually 'Fixes #12345' or 'Updates #12345' at the end of the commit message. Should you have a bug reference?Please address any problems by updating the GitHub PR.
When complete, mark this comment as 'Done' and click the [blue 'Reply' button](https://go.dev/wiki/GerritBot#i-left-a-reply-to-a-comment-in-gerrit-but-no-one-but-me-can-see-it) above. These findings are based on heuristics; if a finding does not apply, briefly reply here saying so.
To update the commit title or commit message body shown here in Gerrit, you must edit the GitHub PR title and PR description (the first comment) in the GitHub web interface using the 'Edit' button or 'Edit' menu entry there. Note: pushing a new commit to the PR will not automatically update the commit message used by Gerrit.
For more details, see:
- [how to update commit messages](https://go.dev/wiki/GerritBot/#how-does-gerritbot-determine-the-final-commit-message) for PRs imported into Gerrit.
- the Go project's [conventions for commit messages](https://go.dev/doc/contribute#commit_messages) that you should follow.
(In general for Gerrit code reviews, the change author is expected to [log in to Gerrit](https://go-review.googlesource.com/login/) with a Gmail or other Google account and then close out each piece of feedback by marking it as 'Done' if implemented as suggested or otherwise reply to each review comment. See the [Review](https://go.dev/doc/contribute#review) section of the Contributing Guide for details.)
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
@mkny...@google.com @mpr...@google.com @thepud...@gmail.com Leaving this comment here for visibility. I wrote a "Release Note" section draft here:
https://github.com/VladSaiocUber/goroutineleak-profile-release-notes/blob/main/README.md
Let me know what you think.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Commit-Queue | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
@mkny...@google.com @mpr...@google.com @thepud...@gmail.com Leaving this comment here for visibility. I wrote a "Release Note" section draft here:
https://github.com/VladSaiocUber/goroutineleak-profile-release-notes/blob/main/README.md
Let me know what you think.
we manage release notes for the next release in-tree. could you please send these release notes as a new CL adding your release notes as a new section to doc/next/04-runtime.md? we can use Gerrit to review from there.
if readgstatus(gp1) == _Gleaked {how does this work if it happens to run during a GC mark phase that's performing the algorithm? because leaked goroutines are switched back to _Gwaiting, won't they be potentially excluded? IIUC, this just means more false negatives if someone happens to request at the same time as the GC is searching for leaked goroutines. this seems okay to me, but may be worth a comment.
if readgstatus(gp1) == _Gleaked {it occurs to me that perhaps this should mask out _Gscan, so it can still read the goroutine even if it's being scanned by the GC.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
Addressed both comments (good catch @mkny...@google.com on scanned leaked status). Will make another CL for the release notes.
how does this work if it happens to run during a GC mark phase that's performing the algorithm? because leaked goroutines are switched back to _Gwaiting, won't they be potentially excluded? IIUC, this just means more false negatives if someone happens to request at the same time as the GC is searching for leaked goroutines. this seems okay to me, but may be worth a comment.
Done
it occurs to me that perhaps this should mask out _Gscan, so it can still read the goroutine even if it's being scanned by the GC.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Commit-Queue | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Code-Review | +2 |
// requests are issued on a single track. Adding synchronization between overredundant
// goroutine leak profile requests would only needlessly increase overhead.is there a world in which we could keep the G in _Gleaked through the algorithm? not for this CL, but maybe worth mentioning as a possibility.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
// requests are issued on a single track. Adding synchronization between overGeorgian-Vlad Saiocredundant
Done
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Commit-Queue | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
// While this is a bug, normal use cases presume that goroutine leak profile
// requests are issued on a single track. Adding synchronization between
// goroutine leak profile requests would only needlessly increase overhead.This seems like a mistake to me. We have a case that we know will generate an incomplete profile. We should avoid exposing that to users.
Why is an incomplete profile preferable to (as the simplest fix) putting a lock around profile collection to prevent the race?
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
// While this is a bug, normal use cases presume that goroutine leak profile
// requests are issued on a single track. Adding synchronization between
// goroutine leak profile requests would only needlessly increase overhead.This seems like a mistake to me. We have a case that we know will generate an incomplete profile. We should avoid exposing that to users.
Why is an incomplete profile preferable to (as the simplest fix) putting a lock around profile collection to prevent the race?
Fixed with a global lock, though the critical section is long (a GC cycle + writing the profile). There is probably a more clever solution somewhere, but didn't want to rock the boat too much so close to the freeze.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Commit-Queue | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
The x/tools trybot failure here is probably a general failure due to CL 717802. (I hit this last night; see Alan's comment at the bottom of that CL).
My guess is it will probably be fixed within a few hours on the x/tools side.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
The x/tools trybot failure here is probably a general failure due to CL 717802. (I hit this last night; see Alan's comment at the bottom of that CL).
My guess is it will probably be fixed within a few hours on the x/tools side.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
// While this is a bug, normal use cases presume that goroutine leak profile
// requests are issued on a single track. Adding synchronization between
// goroutine leak profile requests would only needlessly increase overhead.Georgian-Vlad SaiocThis seems like a mistake to me. We have a case that we know will generate an incomplete profile. We should avoid exposing that to users.
Why is an incomplete profile preferable to (as the simplest fix) putting a lock around profile collection to prevent the race?
Fixed with a global lock, though the critical section is long (a GC cycle + writing the profile). There is probably a more clever solution somewhere, but didn't want to rock the boat too much so close to the freeze.
the 'more clever solution' might be something like having a `_Gwaitingleaked` status. I am loathe to suggest new goroutine states, but this isn't _so_ bad because it would just be treated identically to `_Gwaiting` when the algorithm is running, and `_Gleaked` everywhere else.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
// While this is a bug, normal use cases presume that goroutine leak profile
// requests are issued on a single track. Adding synchronization between
// goroutine leak profile requests would only needlessly increase overhead.Georgian-Vlad SaiocThis seems like a mistake to me. We have a case that we know will generate an incomplete profile. We should avoid exposing that to users.
Why is an incomplete profile preferable to (as the simplest fix) putting a lock around profile collection to prevent the race?
Michael KnyszekFixed with a global lock, though the critical section is long (a GC cycle + writing the profile). There is probably a more clever solution somewhere, but didn't want to rock the boat too much so close to the freeze.
the 'more clever solution' might be something like having a `_Gwaitingleaked` status. I am loathe to suggest new goroutine states, but this isn't _so_ bad because it would just be treated identically to `_Gwaiting` when the algorithm is running, and `_Gleaked` everywhere else.
I think the lock is fine for now, also.
| Code-Review | +2 |
// Unlike in goroutineProfileWithLabelsConcurrent, we don't need to
// save the current goroutine stack, because it is obviously not leaked.
// We also do not need acquire any semaphores on goroutineProfile, because
// we don't use it for storage.I think the comparison to goroutineProfileWithLabelsConcurrent is a bit pointless since this uses just a completely separate mechanism. I would say let's just drop the comment at this point.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Auto-Submit | +1 |
| Code-Review | +2 |
// Unlike in goroutineProfileWithLabelsConcurrent, we don't need to
// save the current goroutine stack, because it is obviously not leaked.
// We also do not need acquire any semaphores on goroutineProfile, because
// we don't use it for storage.I think the comparison to goroutineProfileWithLabelsConcurrent is a bit pointless since this uses just a completely separate mechanism. I would say let's just drop the comment at this point.
Acknowledged
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Code-Review | +1 |
LGTM, thanks! Just one comment about bringing back the nice comment you had.
// NOTE(vsaioc): Each goroutine leak profile request is preceded by a run of the GC
// in goroutine leak detection mode. At the beginning of the GC cycle, all previously
// reported goroutine leaks are reset to _Gwaiting. As a result, incomplete goroutine
// leak profiles may be produced if multiple goroutine leak profile requests are issued
// concurrently.
//
// Example trace:
//
// G1 | GC | G2
// ----------------------+-----------------------------+---------------------
// Request profile | . | .
// . | . | Request profile
// . | [G1] Resets leaked g status | .
// . | [G1] Leaks detected | .
// . | <New cycle> | .
// . | [G2] Resets leaked g status | .
// . | . | Write profile
// . | [G2] Leaks detected | .
// Write profile | . | .
// ----------------------+-----------------------------+---------------------
// G1 completes profile |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| G2 misses leaksCould you add this comment back, on the sync.Mutex you've added in runtime/pprof? It is a quite nice explanation of the problem that the lock protects us from.
The current comment on the mutex is pretty vague, so I imagine future readers not understanding what it actually protects from.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
// NOTE(vsaioc): Each goroutine leak profile request is preceded by a run of the GC
// in goroutine leak detection mode. At the beginning of the GC cycle, all previously
// reported goroutine leaks are reset to _Gwaiting. As a result, incomplete goroutine
// leak profiles may be produced if multiple goroutine leak profile requests are issued
// concurrently.
//
// Example trace:
//
// G1 | GC | G2
// ----------------------+-----------------------------+---------------------
// Request profile | . | .
// . | . | Request profile
// . | [G1] Resets leaked g status | .
// . | [G1] Leaks detected | .
// . | <New cycle> | .
// . | [G2] Resets leaked g status | .
// . | . | Write profile
// . | [G2] Leaks detected | .
// Write profile | . | .
// ----------------------+-----------------------------+---------------------
// G1 completes profile |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| G2 misses leaksCould you add this comment back, on the sync.Mutex you've added in runtime/pprof? It is a quite nice explanation of the problem that the lock protects us from.
The current comment on the mutex is pretty vague, so I imagine future readers not understanding what it actually protects from.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Commit-Queue | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Code-Review | +2 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Code-Review | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |