1
2
3
4
5
6
7 package pprof
8
9 import (
10 "bytes"
11 "context"
12 "fmt"
13 "internal/abi"
14 "internal/profile"
15 "internal/syscall/unix"
16 "internal/testenv"
17 "io"
18 "iter"
19 "math"
20 "math/big"
21 "os"
22 "regexp"
23 "runtime"
24 "runtime/debug"
25 "slices"
26 "strconv"
27 "strings"
28 "sync"
29 "sync/atomic"
30 "testing"
31 "time"
32 _ "unsafe"
33 )
34
35 func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
36
37
38
39
40
41 t0 := time.Now()
42 accum := *y
43 for i := 0; i < 500 || time.Since(t0) < dur; i++ {
44 accum = f(accum)
45 }
46 *y = accum
47 }
48
49 var (
50 salt1 = 0
51 salt2 = 0
52 )
53
54
55
56
57 func cpuHog1(x int) int {
58 return cpuHog0(x, 1e5)
59 }
60
61 func cpuHog0(x, n int) int {
62 foo := x
63 for i := 0; i < n; i++ {
64 if foo > 0 {
65 foo *= foo
66 } else {
67 foo *= foo + 1
68 }
69 }
70 return foo
71 }
72
73 func cpuHog2(x int) int {
74 foo := x
75 for i := 0; i < 1e5; i++ {
76 if foo > 0 {
77 foo *= foo
78 } else {
79 foo *= foo + 2
80 }
81 }
82 return foo
83 }
84
85
86
87 func avoidFunctions() []string {
88 if runtime.Compiler == "gccgo" {
89 return []string{"runtime.sigprof"}
90 }
91 return nil
92 }
93
94 func TestCPUProfile(t *testing.T) {
95 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
96 testCPUProfile(t, matches, func(dur time.Duration) {
97 cpuHogger(cpuHog1, &salt1, dur)
98 })
99 }
100
101 func TestCPUProfileMultithreaded(t *testing.T) {
102 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
103 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
104 testCPUProfile(t, matches, func(dur time.Duration) {
105 c := make(chan int)
106 go func() {
107 cpuHogger(cpuHog1, &salt1, dur)
108 c <- 1
109 }()
110 cpuHogger(cpuHog2, &salt2, dur)
111 <-c
112 })
113 }
114
115 func TestCPUProfileMultithreadMagnitude(t *testing.T) {
116 if runtime.GOOS != "linux" {
117 t.Skip("issue 35057 is only confirmed on Linux")
118 }
119
120
121
122 major, minor := unix.KernelVersion()
123 t.Logf("Running on Linux %d.%d", major, minor)
124 defer func() {
125 if t.Failed() {
126 t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
127 }
128 }()
129
130
131
132
133 if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
134 have59 := major > 5 || (major == 5 && minor >= 9)
135 have516 := major > 5 || (major == 5 && minor >= 16)
136 if have59 && !have516 {
137 testenv.SkipFlaky(t, 49065)
138 }
139 }
140
141
142
143
144
145
146
147
148
149
150
151
152 maxDiff := 0.10
153 if testing.Short() {
154 maxDiff = 0.40
155 }
156
157 compare := func(a, b time.Duration, maxDiff float64) error {
158 if a <= 0 || b <= 0 {
159 return fmt.Errorf("Expected both time reports to be positive")
160 }
161
162 if a < b {
163 a, b = b, a
164 }
165
166 diff := float64(a-b) / float64(a)
167 if diff > maxDiff {
168 return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
169 }
170
171 return nil
172 }
173
174 for _, tc := range []struct {
175 name string
176 workers int
177 }{
178 {
179 name: "serial",
180 workers: 1,
181 },
182 {
183 name: "parallel",
184 workers: runtime.GOMAXPROCS(0),
185 },
186 } {
187
188 t.Run(tc.name, func(t *testing.T) {
189 t.Logf("Running with %d workers", tc.workers)
190
191 var userTime, systemTime time.Duration
192 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
193 acceptProfile := func(t *testing.T, p *profile.Profile) bool {
194 if !matches(t, p) {
195 return false
196 }
197
198 ok := true
199 for i, unit := range []string{"count", "nanoseconds"} {
200 if have, want := p.SampleType[i].Unit, unit; have != want {
201 t.Logf("pN SampleType[%d]; %q != %q", i, have, want)
202 ok = false
203 }
204 }
205
206
207
208
209
210
211 var value time.Duration
212 for _, sample := range p.Sample {
213 value += time.Duration(sample.Value[1]) * time.Nanosecond
214 }
215
216 totalTime := userTime + systemTime
217 t.Logf("compare %s user + %s system = %s vs %s", userTime, systemTime, totalTime, value)
218 if err := compare(totalTime, value, maxDiff); err != nil {
219 t.Logf("compare got %v want nil", err)
220 ok = false
221 }
222
223 return ok
224 }
225
226 testCPUProfile(t, acceptProfile, func(dur time.Duration) {
227 userTime, systemTime = diffCPUTime(t, func() {
228 var wg sync.WaitGroup
229 var once sync.Once
230 for i := 0; i < tc.workers; i++ {
231 wg.Add(1)
232 go func() {
233 defer wg.Done()
234 var salt = 0
235 cpuHogger(cpuHog1, &salt, dur)
236 once.Do(func() { salt1 = salt })
237 }()
238 }
239 wg.Wait()
240 })
241 })
242 })
243 }
244 }
245
246
247
248 func containsInlinedCall(f any, maxBytes int) bool {
249 _, found := findInlinedCall(f, maxBytes)
250 return found
251 }
252
253
254
255 func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
256 fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
257 if fFunc == nil || fFunc.Entry() == 0 {
258 panic("failed to locate function entry")
259 }
260
261 for offset := 0; offset < maxBytes; offset++ {
262 innerPC := fFunc.Entry() + uintptr(offset)
263 inner := runtime.FuncForPC(innerPC)
264 if inner == nil {
265
266
267 continue
268 }
269 if inner.Entry() != fFunc.Entry() {
270
271 break
272 }
273 if inner.Name() != fFunc.Name() {
274
275
276 return uint64(innerPC), true
277 }
278 }
279
280 return 0, false
281 }
282
283 func TestCPUProfileInlining(t *testing.T) {
284 if !containsInlinedCall(inlinedCaller, 4<<10) {
285 t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
286 }
287
288 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
289 p := testCPUProfile(t, matches, func(dur time.Duration) {
290 cpuHogger(inlinedCaller, &salt1, dur)
291 })
292
293
294 for _, loc := range p.Location {
295 hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
296 for _, line := range loc.Line {
297 if line.Function.Name == "runtime/pprof.inlinedCallee" {
298 hasInlinedCallee = true
299 }
300 if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
301 hasInlinedCallerAfterInlinedCallee = true
302 }
303 }
304 if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
305 t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
306 }
307 }
308 }
309
310 func inlinedCaller(x int) int {
311 x = inlinedCallee(x, 1e5)
312 return x
313 }
314
315 func inlinedCallee(x, n int) int {
316 return cpuHog0(x, n)
317 }
318
319
320 func dumpCallers(pcs []uintptr) {
321 if pcs == nil {
322 return
323 }
324
325 skip := 2
326 runtime.Callers(skip, pcs)
327 }
328
329
330 func inlinedCallerDump(pcs []uintptr) {
331 inlinedCalleeDump(pcs)
332 }
333
334 func inlinedCalleeDump(pcs []uintptr) {
335 dumpCallers(pcs)
336 }
337
338 type inlineWrapperInterface interface {
339 dump(stack []uintptr)
340 }
341
342 type inlineWrapper struct {
343 }
344
345 func (h inlineWrapper) dump(pcs []uintptr) {
346 dumpCallers(pcs)
347 }
348
349 func inlinedWrapperCallerDump(pcs []uintptr) {
350 var h inlineWrapperInterface
351 h = &inlineWrapper{}
352 h.dump(pcs)
353 }
354
355 func TestCPUProfileRecursion(t *testing.T) {
356 matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
357 p := testCPUProfile(t, matches, func(dur time.Duration) {
358 cpuHogger(recursionCaller, &salt1, dur)
359 })
360
361
362 for i, loc := range p.Location {
363 recursionFunc := 0
364 for _, line := range loc.Line {
365 if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
366 recursionFunc++
367 }
368 }
369 if recursionFunc > 1 {
370 t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
371 }
372 }
373 }
374
375 func recursionCaller(x int) int {
376 y := recursionCallee(3, x)
377 return y
378 }
379
380 func recursionCallee(n, x int) int {
381 if n == 0 {
382 return 1
383 }
384 y := inlinedCallee(x, 1e4)
385 return y * recursionCallee(n-1, x)
386 }
387
388 func recursionChainTop(x int, pcs []uintptr) {
389 if x < 0 {
390 return
391 }
392 recursionChainMiddle(x, pcs)
393 }
394
395 func recursionChainMiddle(x int, pcs []uintptr) {
396 recursionChainBottom(x, pcs)
397 }
398
399 func recursionChainBottom(x int, pcs []uintptr) {
400
401
402 dumpCallers(pcs)
403
404 recursionChainTop(x-1, pcs)
405 }
406
407 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
408 p, err := profile.Parse(bytes.NewReader(valBytes))
409 if err != nil {
410 t.Fatal(err)
411 }
412 for _, sample := range p.Sample {
413 count := uintptr(sample.Value[0])
414 f(count, sample.Location, sample.Label)
415 }
416 return p
417 }
418
419
420
421 func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
422 switch runtime.GOOS {
423 case "darwin":
424 out, err := testenv.Command(t, "uname", "-a").CombinedOutput()
425 if err != nil {
426 t.Fatal(err)
427 }
428 vers := string(out)
429 t.Logf("uname -a: %v", vers)
430 case "plan9":
431 t.Skip("skipping on plan9")
432 case "wasip1":
433 t.Skip("skipping on wasip1")
434 }
435
436 broken := testenv.CPUProfilingBroken()
437
438 deadline, ok := t.Deadline()
439 if broken || !ok {
440 if broken && testing.Short() {
441
442 deadline = time.Now().Add(1 * time.Second)
443 } else {
444 deadline = time.Now().Add(10 * time.Second)
445 }
446 }
447
448
449
450 duration := 5 * time.Second
451 if testing.Short() {
452 duration = 100 * time.Millisecond
453 }
454
455
456
457
458
459
460 for {
461 var prof bytes.Buffer
462 if err := StartCPUProfile(&prof); err != nil {
463 t.Fatal(err)
464 }
465 f(duration)
466 StopCPUProfile()
467
468 if p, ok := profileOk(t, matches, &prof, duration); ok {
469 return p
470 }
471
472 duration *= 2
473 if time.Until(deadline) < duration {
474 break
475 }
476 t.Logf("retrying with %s duration", duration)
477 }
478
479 if broken {
480 t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
481 }
482
483
484
485
486
487 if os.Getenv("IN_QEMU") == "1" {
488 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
489 }
490 t.FailNow()
491 return nil
492 }
493
494 var diffCPUTimeImpl func(f func()) (user, system time.Duration)
495
496 func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {
497 if fn := diffCPUTimeImpl; fn != nil {
498 return fn(f)
499 }
500 t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
501 return 0, 0
502 }
503
504
505 func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
506 for _, loc := range stk {
507 for _, line := range loc.Line {
508 if strings.Contains(line.Function.Name, spec) {
509 return true
510 }
511 }
512 }
513 return false
514 }
515
516 type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
517
518 func profileOk(t *testing.T, matches profileMatchFunc, prof *bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
519 ok = true
520
521 var samples uintptr
522 var buf strings.Builder
523 p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
524 fmt.Fprintf(&buf, "%d:", count)
525 fprintStack(&buf, stk)
526 fmt.Fprintf(&buf, " labels: %v\n", labels)
527 samples += count
528 fmt.Fprintf(&buf, "\n")
529 })
530 t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
531
532 if samples < 10 && runtime.GOOS == "windows" {
533
534
535
536 t.Log("too few samples on Windows (golang.org/issue/10842)")
537 return p, false
538 }
539
540
541
542
543
544
545 if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
546 t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
547 ok = false
548 }
549
550 if matches != nil && !matches(t, p) {
551 ok = false
552 }
553
554 return p, ok
555 }
556
557 type profileMatchFunc func(*testing.T, *profile.Profile) bool
558
559 func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
560 return func(t *testing.T, p *profile.Profile) (ok bool) {
561 ok = true
562
563
564
565 have := make([]uintptr, len(need))
566 avoidSamples := make([]uintptr, len(avoid))
567
568 for _, sample := range p.Sample {
569 count := uintptr(sample.Value[0])
570 for i, spec := range need {
571 if matches(spec, count, sample.Location, sample.Label) {
572 have[i] += count
573 }
574 }
575 for i, name := range avoid {
576 for _, loc := range sample.Location {
577 for _, line := range loc.Line {
578 if strings.Contains(line.Function.Name, name) {
579 avoidSamples[i] += count
580 }
581 }
582 }
583 }
584 }
585
586 for i, name := range avoid {
587 bad := avoidSamples[i]
588 if bad != 0 {
589 t.Logf("found %d samples in avoid-function %s\n", bad, name)
590 ok = false
591 }
592 }
593
594 if len(need) == 0 {
595 return
596 }
597
598 var total uintptr
599 for i, name := range need {
600 total += have[i]
601 t.Logf("found %d samples in expected function %s\n", have[i], name)
602 }
603 if total == 0 {
604 t.Logf("no samples in expected functions")
605 ok = false
606 }
607
608
609
610
611
612 min := uintptr(1)
613 for i, name := range need {
614 if have[i] < min {
615 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
616 ok = false
617 }
618 }
619 return
620 }
621 }
622
623
624
625 func TestCPUProfileWithFork(t *testing.T) {
626 testenv.MustHaveExec(t)
627
628 exe, err := os.Executable()
629 if err != nil {
630 t.Fatal(err)
631 }
632
633 heap := 1 << 30
634 if runtime.GOOS == "android" {
635
636 heap = 100 << 20
637 }
638 if testing.Short() {
639 heap = 100 << 20
640 }
641
642 garbage := make([]byte, heap)
643
644 done := make(chan bool)
645 go func() {
646 for i := range garbage {
647 garbage[i] = 42
648 }
649 done <- true
650 }()
651 <-done
652
653 var prof bytes.Buffer
654 if err := StartCPUProfile(&prof); err != nil {
655 t.Fatal(err)
656 }
657 defer StopCPUProfile()
658
659 for i := 0; i < 10; i++ {
660 testenv.Command(t, exe, "-h").CombinedOutput()
661 }
662 }
663
664
665
666
667 func TestGoroutineSwitch(t *testing.T) {
668 if runtime.Compiler == "gccgo" {
669 t.Skip("not applicable for gccgo")
670 }
671
672
673
674 tries := 10
675 count := 1000000
676 if testing.Short() {
677 tries = 1
678 }
679 for try := 0; try < tries; try++ {
680 var prof bytes.Buffer
681 if err := StartCPUProfile(&prof); err != nil {
682 t.Fatal(err)
683 }
684 for i := 0; i < count; i++ {
685 runtime.Gosched()
686 }
687 StopCPUProfile()
688
689
690
691
692
693 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
694
695
696 if len(stk) == 2 {
697 name := stk[1].Line[0].Function.Name
698 if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
699 return
700 }
701 }
702
703
704
705 if len(stk) == 1 {
706 return
707 }
708
709
710
711 name := stk[0].Line[0].Function.Name
712 if name == "gogo" {
713 var buf strings.Builder
714 fprintStack(&buf, stk)
715 t.Fatalf("found profile entry for gogo:\n%s", buf.String())
716 }
717 })
718 }
719 }
720
721 func fprintStack(w io.Writer, stk []*profile.Location) {
722 if len(stk) == 0 {
723 fmt.Fprintf(w, " (stack empty)")
724 }
725 for _, loc := range stk {
726 fmt.Fprintf(w, " %#x", loc.Address)
727 fmt.Fprintf(w, " (")
728 for i, line := range loc.Line {
729 if i > 0 {
730 fmt.Fprintf(w, " ")
731 }
732 fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
733 }
734 fmt.Fprintf(w, ")")
735 }
736 }
737
738
739 func TestMathBigDivide(t *testing.T) {
740 testCPUProfile(t, nil, func(duration time.Duration) {
741 t := time.After(duration)
742 pi := new(big.Int)
743 for {
744 for i := 0; i < 100; i++ {
745 n := big.NewInt(2646693125139304345)
746 d := big.NewInt(842468587426513207)
747 pi.Div(n, d)
748 }
749 select {
750 case <-t:
751 return
752 default:
753 }
754 }
755 })
756 }
757
758
759 func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
760 for _, f := range strings.Split(spec, ",") {
761 if !stackContains(f, count, stk, labels) {
762 return false
763 }
764 }
765 return true
766 }
767
768 func TestMorestack(t *testing.T) {
769 matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
770 testCPUProfile(t, matches, func(duration time.Duration) {
771 t := time.After(duration)
772 c := make(chan bool)
773 for {
774 go func() {
775 growstack1()
776 c <- true
777 }()
778 select {
779 case <-t:
780 return
781 case <-c:
782 }
783 }
784 })
785 }
786
787
788 func growstack1() {
789 growstack(10)
790 }
791
792
793 func growstack(n int) {
794 var buf [8 << 18]byte
795 use(buf)
796 if n > 0 {
797 growstack(n - 1)
798 }
799 }
800
801
802 func use(x [8 << 18]byte) {}
803
804 func TestBlockProfile(t *testing.T) {
805 type TestCase struct {
806 name string
807 f func(*testing.T)
808 stk []string
809 re string
810 }
811 tests := [...]TestCase{
812 {
813 name: "chan recv",
814 f: blockChanRecv,
815 stk: []string{
816 "runtime.chanrecv1",
817 "runtime/pprof.blockChanRecv",
818 "runtime/pprof.TestBlockProfile",
819 },
820 re: `
821 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
822 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
823 # 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
824 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
825 `},
826 {
827 name: "chan send",
828 f: blockChanSend,
829 stk: []string{
830 "runtime.chansend1",
831 "runtime/pprof.blockChanSend",
832 "runtime/pprof.TestBlockProfile",
833 },
834 re: `
835 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
836 # 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
837 # 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
838 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
839 `},
840 {
841 name: "chan close",
842 f: blockChanClose,
843 stk: []string{
844 "runtime.chanrecv1",
845 "runtime/pprof.blockChanClose",
846 "runtime/pprof.TestBlockProfile",
847 },
848 re: `
849 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
850 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*runtime/chan.go:[0-9]+
851 # 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
852 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
853 `},
854 {
855 name: "select recv async",
856 f: blockSelectRecvAsync,
857 stk: []string{
858 "runtime.selectgo",
859 "runtime/pprof.blockSelectRecvAsync",
860 "runtime/pprof.TestBlockProfile",
861 },
862 re: `
863 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
864 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
865 # 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
866 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
867 `},
868 {
869 name: "select send sync",
870 f: blockSelectSendSync,
871 stk: []string{
872 "runtime.selectgo",
873 "runtime/pprof.blockSelectSendSync",
874 "runtime/pprof.TestBlockProfile",
875 },
876 re: `
877 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
878 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*runtime/select.go:[0-9]+
879 # 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
880 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
881 `},
882 {
883 name: "mutex",
884 f: blockMutex,
885 stk: []string{
886 "sync.(*Mutex).Lock",
887 "runtime/pprof.blockMutex",
888 "runtime/pprof.TestBlockProfile",
889 },
890 re: `
891 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
892 # 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*sync/mutex\.go:[0-9]+
893 # 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
894 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
895 `},
896 {
897 name: "cond",
898 f: blockCond,
899 stk: []string{
900 "sync.(*Cond).Wait",
901 "runtime/pprof.blockCond",
902 "runtime/pprof.TestBlockProfile",
903 },
904 re: `
905 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
906 # 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*sync/cond\.go:[0-9]+
907 # 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
908 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*runtime/pprof/pprof_test.go:[0-9]+
909 `},
910 }
911
912
913 runtime.SetBlockProfileRate(1)
914 defer runtime.SetBlockProfileRate(0)
915 for _, test := range tests {
916 test.f(t)
917 }
918
919 t.Run("debug=1", func(t *testing.T) {
920 var w strings.Builder
921 Lookup("block").WriteTo(&w, 1)
922 prof := w.String()
923
924 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
925 t.Fatalf("Bad profile header:\n%v", prof)
926 }
927
928 if strings.HasSuffix(prof, "#\t0x0\n\n") {
929 t.Errorf("Useless 0 suffix:\n%v", prof)
930 }
931
932 for _, test := range tests {
933 if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
934 t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
935 }
936 }
937 })
938
939 t.Run("proto", func(t *testing.T) {
940
941 var w bytes.Buffer
942 Lookup("block").WriteTo(&w, 0)
943 p, err := profile.Parse(&w)
944 if err != nil {
945 t.Fatalf("failed to parse profile: %v", err)
946 }
947 t.Logf("parsed proto: %s", p)
948 if err := p.CheckValid(); err != nil {
949 t.Fatalf("invalid profile: %v", err)
950 }
951
952 stks := profileStacks(p)
953 for _, test := range tests {
954 if !containsStack(stks, test.stk) {
955 t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
956 }
957 }
958 })
959
960 }
961
962 func profileStacks(p *profile.Profile) (res [][]string) {
963 for _, s := range p.Sample {
964 var stk []string
965 for _, l := range s.Location {
966 for _, line := range l.Line {
967 stk = append(stk, line.Function.Name)
968 }
969 }
970 res = append(res, stk)
971 }
972 return res
973 }
974
975 func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {
976 for _, record := range records {
977 frames := runtime.CallersFrames(record.Stack())
978 var stk []string
979 for {
980 frame, more := frames.Next()
981 stk = append(stk, frame.Function)
982 if !more {
983 break
984 }
985 }
986 res = append(res, stk)
987 }
988 return res
989 }
990
991 func containsStack(got [][]string, want []string) bool {
992 for _, stk := range got {
993 if len(stk) < len(want) {
994 continue
995 }
996 for i, f := range want {
997 if f != stk[i] {
998 break
999 }
1000 if i == len(want)-1 {
1001 return true
1002 }
1003 }
1004 }
1005 return false
1006 }
1007
1008
1009
1010
1011 func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {
1012 re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
1013 r := regexp.MustCompile(re)
1014
1015 if deadline, ok := t.Deadline(); ok {
1016 if d := time.Until(deadline); d > 1*time.Second {
1017 timer := time.AfterFunc(d-1*time.Second, func() {
1018 debug.SetTraceback("all")
1019 panic(fmt.Sprintf("timed out waiting for %#q", re))
1020 })
1021 defer timer.Stop()
1022 }
1023 }
1024
1025 buf := make([]byte, 64<<10)
1026 for {
1027 runtime.Gosched()
1028 n := runtime.Stack(buf, true)
1029 if n == len(buf) {
1030
1031
1032 buf = make([]byte, 2*len(buf))
1033 continue
1034 }
1035 if len(r.FindAll(buf[:n], -1)) >= count {
1036 return
1037 }
1038 }
1039 }
1040
1041 func blockChanRecv(t *testing.T) {
1042 c := make(chan bool)
1043 go func() {
1044 awaitBlockedGoroutine(t, "chan receive", "blockChanRecv", 1)
1045 c <- true
1046 }()
1047 <-c
1048 }
1049
1050 func blockChanSend(t *testing.T) {
1051 c := make(chan bool)
1052 go func() {
1053 awaitBlockedGoroutine(t, "chan send", "blockChanSend", 1)
1054 <-c
1055 }()
1056 c <- true
1057 }
1058
1059 func blockChanClose(t *testing.T) {
1060 c := make(chan bool)
1061 go func() {
1062 awaitBlockedGoroutine(t, "chan receive", "blockChanClose", 1)
1063 close(c)
1064 }()
1065 <-c
1066 }
1067
1068 func blockSelectRecvAsync(t *testing.T) {
1069 const numTries = 3
1070 c := make(chan bool, 1)
1071 c2 := make(chan bool, 1)
1072 go func() {
1073 for i := 0; i < numTries; i++ {
1074 awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync", 1)
1075 c <- true
1076 }
1077 }()
1078 for i := 0; i < numTries; i++ {
1079 select {
1080 case <-c:
1081 case <-c2:
1082 }
1083 }
1084 }
1085
1086 func blockSelectSendSync(t *testing.T) {
1087 c := make(chan bool)
1088 c2 := make(chan bool)
1089 go func() {
1090 awaitBlockedGoroutine(t, "select", "blockSelectSendSync", 1)
1091 <-c
1092 }()
1093 select {
1094 case c <- true:
1095 case c2 <- true:
1096 }
1097 }
1098
1099 func blockMutex(t *testing.T) {
1100 var mu sync.Mutex
1101 mu.Lock()
1102 go func() {
1103 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", 1)
1104 mu.Unlock()
1105 }()
1106
1107
1108
1109
1110 mu.Lock()
1111 }
1112
1113 func blockMutexN(t *testing.T, n int, d time.Duration) {
1114 var wg sync.WaitGroup
1115 var mu sync.Mutex
1116 mu.Lock()
1117 go func() {
1118 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", n)
1119 time.Sleep(d)
1120 mu.Unlock()
1121 }()
1122
1123
1124
1125
1126 for i := 0; i < n; i++ {
1127 wg.Add(1)
1128 go func() {
1129 defer wg.Done()
1130 mu.Lock()
1131 mu.Unlock()
1132 }()
1133 }
1134 wg.Wait()
1135 }
1136
1137 func blockCond(t *testing.T) {
1138 var mu sync.Mutex
1139 c := sync.NewCond(&mu)
1140 mu.Lock()
1141 go func() {
1142 awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond", 1)
1143 mu.Lock()
1144 c.Signal()
1145 mu.Unlock()
1146 }()
1147 c.Wait()
1148 mu.Unlock()
1149 }
1150
1151
1152 func TestBlockProfileBias(t *testing.T) {
1153 rate := int(1000)
1154 runtime.SetBlockProfileRate(rate)
1155 defer runtime.SetBlockProfileRate(0)
1156
1157
1158 blockFrequentShort(rate)
1159 blockInfrequentLong(rate)
1160
1161 var w bytes.Buffer
1162 Lookup("block").WriteTo(&w, 0)
1163 p, err := profile.Parse(&w)
1164 if err != nil {
1165 t.Fatalf("failed to parse profile: %v", err)
1166 }
1167 t.Logf("parsed proto: %s", p)
1168
1169 il := float64(-1)
1170 fs := float64(-1)
1171 for _, s := range p.Sample {
1172 for _, l := range s.Location {
1173 for _, line := range l.Line {
1174 if len(s.Value) < 2 {
1175 t.Fatal("block profile has less than 2 sample types")
1176 }
1177
1178 if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
1179 il = float64(s.Value[1])
1180 } else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
1181 fs = float64(s.Value[1])
1182 }
1183 }
1184 }
1185 }
1186 if il == -1 || fs == -1 {
1187 t.Fatal("block profile is missing expected functions")
1188 }
1189
1190
1191 const threshold = 0.2
1192 if bias := (il - fs) / il; math.Abs(bias) > threshold {
1193 t.Fatalf("bias: abs(%f) > %f", bias, threshold)
1194 } else {
1195 t.Logf("bias: abs(%f) < %f", bias, threshold)
1196 }
1197 }
1198
1199
1200
1201 func blockFrequentShort(rate int) {
1202 for i := 0; i < 100000; i++ {
1203 blockevent(int64(rate/10), 1)
1204 }
1205 }
1206
1207
1208
1209 func blockInfrequentLong(rate int) {
1210 for i := 0; i < 10000; i++ {
1211 blockevent(int64(rate), 1)
1212 }
1213 }
1214
1215
1216
1217
1218 func blockevent(cycles int64, skip int)
1219
1220 func TestMutexProfile(t *testing.T) {
1221
1222
1223 old := runtime.SetMutexProfileFraction(1)
1224 defer runtime.SetMutexProfileFraction(old)
1225 if old != 0 {
1226 t.Fatalf("need MutexProfileRate 0, got %d", old)
1227 }
1228
1229 const (
1230 N = 100
1231 D = 100 * time.Millisecond
1232 )
1233 start := time.Now()
1234 blockMutexN(t, N, D)
1235 blockMutexNTime := time.Since(start)
1236
1237 t.Run("debug=1", func(t *testing.T) {
1238 var w strings.Builder
1239 Lookup("mutex").WriteTo(&w, 1)
1240 prof := w.String()
1241 t.Logf("received profile: %v", prof)
1242
1243 if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
1244 t.Errorf("Bad profile header:\n%v", prof)
1245 }
1246 prof = strings.Trim(prof, "\n")
1247 lines := strings.Split(prof, "\n")
1248 if len(lines) < 6 {
1249 t.Fatalf("expected >=6 lines, got %d %q\n%s", len(lines), prof, prof)
1250 }
1251
1252 r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
1253 if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
1254 t.Errorf("%q didn't match %q", lines[3], r2)
1255 }
1256 r3 := "^#.*runtime/pprof.blockMutex.*$"
1257 if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
1258 t.Errorf("%q didn't match %q", lines[5], r3)
1259 }
1260 t.Log(prof)
1261 })
1262 t.Run("proto", func(t *testing.T) {
1263
1264 var w bytes.Buffer
1265 Lookup("mutex").WriteTo(&w, 0)
1266 p, err := profile.Parse(&w)
1267 if err != nil {
1268 t.Fatalf("failed to parse profile: %v", err)
1269 }
1270 t.Logf("parsed proto: %s", p)
1271 if err := p.CheckValid(); err != nil {
1272 t.Fatalf("invalid profile: %v", err)
1273 }
1274
1275 stks := profileStacks(p)
1276 for _, want := range [][]string{
1277 {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
1278 } {
1279 if !containsStack(stks, want) {
1280 t.Errorf("No matching stack entry for %+v", want)
1281 }
1282 }
1283
1284 i := 0
1285 for ; i < len(p.SampleType); i++ {
1286 if p.SampleType[i].Unit == "nanoseconds" {
1287 break
1288 }
1289 }
1290 if i >= len(p.SampleType) {
1291 t.Fatalf("profile did not contain nanoseconds sample")
1292 }
1293 total := int64(0)
1294 for _, s := range p.Sample {
1295 total += s.Value[i]
1296 }
1297
1298
1299
1300
1301
1302
1303
1304
1305 d := time.Duration(total)
1306 lo := time.Duration(N * D * 9 / 10)
1307 hi := time.Duration(N) * blockMutexNTime * 11 / 10
1308 if d < lo || d > hi {
1309 for _, s := range p.Sample {
1310 t.Logf("sample: %s", time.Duration(s.Value[i]))
1311 }
1312 t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
1313 }
1314 })
1315
1316 t.Run("records", func(t *testing.T) {
1317
1318 var records []runtime.BlockProfileRecord
1319 for {
1320 n, ok := runtime.MutexProfile(records)
1321 if ok {
1322 records = records[:n]
1323 break
1324 }
1325 records = make([]runtime.BlockProfileRecord, n*2)
1326 }
1327
1328
1329
1330
1331 stks := blockRecordStacks(records)
1332 want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"}
1333 if !containsStack(stks, want) {
1334 t.Errorf("No matching stack entry for %+v", want)
1335 }
1336 })
1337 }
1338
1339 func TestMutexProfileRateAdjust(t *testing.T) {
1340 old := runtime.SetMutexProfileFraction(1)
1341 defer runtime.SetMutexProfileFraction(old)
1342 if old != 0 {
1343 t.Fatalf("need MutexProfileRate 0, got %d", old)
1344 }
1345
1346 readProfile := func() (contentions int64, delay int64) {
1347 var w bytes.Buffer
1348 Lookup("mutex").WriteTo(&w, 0)
1349 p, err := profile.Parse(&w)
1350 if err != nil {
1351 t.Fatalf("failed to parse profile: %v", err)
1352 }
1353 t.Logf("parsed proto: %s", p)
1354 if err := p.CheckValid(); err != nil {
1355 t.Fatalf("invalid profile: %v", err)
1356 }
1357
1358 for _, s := range p.Sample {
1359 var match, runtimeInternal bool
1360 for _, l := range s.Location {
1361 for _, line := range l.Line {
1362 if line.Function.Name == "runtime/pprof.blockMutex.func1" {
1363 match = true
1364 }
1365 if line.Function.Name == "runtime.unlock" {
1366 runtimeInternal = true
1367 }
1368 }
1369 }
1370 if match && !runtimeInternal {
1371 contentions += s.Value[0]
1372 delay += s.Value[1]
1373 }
1374 }
1375 return
1376 }
1377
1378 blockMutex(t)
1379 contentions, delay := readProfile()
1380 if contentions == 0 {
1381 t.Fatal("did not see expected function in profile")
1382 }
1383 runtime.SetMutexProfileFraction(0)
1384 newContentions, newDelay := readProfile()
1385 if newContentions != contentions || newDelay != delay {
1386 t.Fatalf("sample value changed: got [%d, %d], want [%d, %d]", newContentions, newDelay, contentions, delay)
1387 }
1388 }
1389
1390 func func1(c chan int) { <-c }
1391 func func2(c chan int) { <-c }
1392 func func3(c chan int) { <-c }
1393 func func4(c chan int) { <-c }
1394
1395 func TestGoroutineCounts(t *testing.T) {
1396
1397
1398 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
1399
1400 c := make(chan int)
1401 for i := 0; i < 100; i++ {
1402 switch {
1403 case i%10 == 0:
1404 go func1(c)
1405 case i%2 == 0:
1406 go func2(c)
1407 default:
1408 go func3(c)
1409 }
1410
1411 for j := 0; j < 5; j++ {
1412 runtime.Gosched()
1413 }
1414 }
1415 ctx := context.Background()
1416
1417
1418
1419 Do(ctx, Labels("label", "value"), func(context.Context) {
1420 for i := 0; i < 89; i++ {
1421 switch {
1422 case i%10 == 0:
1423 go func1(c)
1424 case i%2 == 0:
1425 go func2(c)
1426 default:
1427 go func3(c)
1428 }
1429
1430 for j := 0; j < 5; j++ {
1431 runtime.Gosched()
1432 }
1433 }
1434 })
1435
1436 SetGoroutineLabels(WithLabels(context.Background(), Labels("self-label", "self-value")))
1437 defer SetGoroutineLabels(context.Background())
1438
1439 garbage := new(*int)
1440 fingReady := make(chan struct{})
1441 runtime.SetFinalizer(garbage, func(v **int) {
1442 Do(context.Background(), Labels("fing-label", "fing-value"), func(ctx context.Context) {
1443 close(fingReady)
1444 <-c
1445 })
1446 })
1447 garbage = nil
1448 for i := 0; i < 2; i++ {
1449 runtime.GC()
1450 }
1451 <-fingReady
1452
1453 var w bytes.Buffer
1454 goroutineProf := Lookup("goroutine")
1455
1456
1457 goroutineProf.WriteTo(&w, 1)
1458 prof := w.String()
1459
1460 labels := labelMap{Labels("label", "value")}
1461 labelStr := "\n# labels: " + labels.String()
1462 selfLabel := labelMap{Labels("self-label", "self-value")}
1463 selfLabelStr := "\n# labels: " + selfLabel.String()
1464 fingLabel := labelMap{Labels("fing-label", "fing-value")}
1465 fingLabelStr := "\n# labels: " + fingLabel.String()
1466 orderedPrefix := []string{
1467 "\n50 @ ",
1468 "\n44 @", labelStr,
1469 "\n40 @",
1470 "\n36 @", labelStr,
1471 "\n10 @",
1472 "\n9 @", labelStr,
1473 "\n1 @"}
1474 if !containsInOrder(prof, append(orderedPrefix, selfLabelStr)...) {
1475 t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
1476 }
1477 if !containsInOrder(prof, append(orderedPrefix, fingLabelStr)...) {
1478 t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
1479 }
1480
1481
1482 w.Reset()
1483 goroutineProf.WriteTo(&w, 0)
1484 p, err := profile.Parse(&w)
1485 if err != nil {
1486 t.Errorf("error parsing protobuf profile: %v", err)
1487 }
1488 if err := p.CheckValid(); err != nil {
1489 t.Errorf("protobuf profile is invalid: %v", err)
1490 }
1491 expectedLabels := map[int64]map[string]string{
1492 50: {},
1493 44: {"label": "value"},
1494 40: {},
1495 36: {"label": "value"},
1496 10: {},
1497 9: {"label": "value"},
1498 1: {"self-label": "self-value", "fing-label": "fing-value"},
1499 }
1500 if !containsCountsLabels(p, expectedLabels) {
1501 t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
1502 expectedLabels, p)
1503 }
1504
1505 close(c)
1506
1507 time.Sleep(10 * time.Millisecond)
1508 }
1509
1510 func containsInOrder(s string, all ...string) bool {
1511 for _, t := range all {
1512 var ok bool
1513 if _, s, ok = strings.Cut(s, t); !ok {
1514 return false
1515 }
1516 }
1517 return true
1518 }
1519
1520 func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
1521 m := make(map[int64]int)
1522 type nkey struct {
1523 count int64
1524 key, val string
1525 }
1526 n := make(map[nkey]int)
1527 for c, kv := range countLabels {
1528 m[c]++
1529 for k, v := range kv {
1530 n[nkey{
1531 count: c,
1532 key: k,
1533 val: v,
1534 }]++
1535
1536 }
1537 }
1538 for _, s := range prof.Sample {
1539
1540 if len(s.Value) != 1 {
1541 return false
1542 }
1543 m[s.Value[0]]--
1544 for k, vs := range s.Label {
1545 for _, v := range vs {
1546 n[nkey{
1547 count: s.Value[0],
1548 key: k,
1549 val: v,
1550 }]--
1551 }
1552 }
1553 }
1554 for _, n := range m {
1555 if n > 0 {
1556 return false
1557 }
1558 }
1559 for _, ncnt := range n {
1560 if ncnt != 0 {
1561 return false
1562 }
1563 }
1564 return true
1565 }
1566
1567 func TestGoroutineProfileConcurrency(t *testing.T) {
1568 testenv.MustHaveParallelism(t)
1569
1570 goroutineProf := Lookup("goroutine")
1571
1572 profilerCalls := func(s string) int {
1573 return strings.Count(s, "\truntime/pprof.runtime_goroutineProfileWithLabels+")
1574 }
1575
1576 includesFinalizerOrCleanup := func(s string) bool {
1577 return strings.Contains(s, "runtime.runFinalizers") || strings.Contains(s, "runtime.runCleanups")
1578 }
1579
1580
1581
1582 t.Run("overlapping profile requests", func(t *testing.T) {
1583 ctx := context.Background()
1584 ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
1585 defer cancel()
1586
1587 var wg sync.WaitGroup
1588 for i := 0; i < 2; i++ {
1589 wg.Add(1)
1590 Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
1591 go func() {
1592 defer wg.Done()
1593 for ctx.Err() == nil {
1594 var w strings.Builder
1595 goroutineProf.WriteTo(&w, 1)
1596 prof := w.String()
1597 count := profilerCalls(prof)
1598 if count >= 2 {
1599 t.Logf("prof %d\n%s", count, prof)
1600 cancel()
1601 }
1602 }
1603 }()
1604 })
1605 }
1606 wg.Wait()
1607 })
1608
1609
1610
1611 t.Run("finalizer not present", func(t *testing.T) {
1612 var w strings.Builder
1613 goroutineProf.WriteTo(&w, 1)
1614 prof := w.String()
1615 if includesFinalizerOrCleanup(prof) {
1616 t.Errorf("profile includes finalizer or cleanup (but should be marked as system):\n%s", prof)
1617 }
1618 })
1619
1620
1621 t.Run("finalizer present", func(t *testing.T) {
1622
1623
1624
1625 type T *byte
1626 obj := new(T)
1627 ch1, ch2 := make(chan int), make(chan int)
1628 defer close(ch2)
1629 runtime.SetFinalizer(obj, func(_ interface{}) {
1630 close(ch1)
1631 <-ch2
1632 })
1633 obj = nil
1634 for i := 10; i >= 0; i-- {
1635 select {
1636 case <-ch1:
1637 default:
1638 if i == 0 {
1639 t.Fatalf("finalizer did not run")
1640 }
1641 runtime.GC()
1642 }
1643 }
1644 var w strings.Builder
1645 goroutineProf.WriteTo(&w, 1)
1646 prof := w.String()
1647 if !includesFinalizerOrCleanup(prof) {
1648 t.Errorf("profile does not include finalizer (and it should be marked as user):\n%s", prof)
1649 }
1650 })
1651
1652
1653 testLaunches := func(t *testing.T) {
1654 var done sync.WaitGroup
1655 defer done.Wait()
1656
1657 ctx := context.Background()
1658 ctx, cancel := context.WithCancel(ctx)
1659 defer cancel()
1660
1661 ch := make(chan int)
1662 defer close(ch)
1663
1664 var ready sync.WaitGroup
1665
1666
1667
1668
1669 ready.Add(1)
1670 done.Add(1)
1671 go func() {
1672 defer done.Done()
1673 for i := 0; ctx.Err() == nil; i++ {
1674
1675
1676 SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-loop-i", fmt.Sprint(i))))
1677 done.Add(1)
1678 go func() {
1679 <-ch
1680 done.Done()
1681 }()
1682 for j := 0; j < i; j++ {
1683
1684
1685
1686
1687
1688
1689 runtime.Gosched()
1690 }
1691 if i == 0 {
1692 ready.Done()
1693 }
1694 }
1695 }()
1696
1697
1698
1699
1700 ready.Add(1)
1701 var churn func(i int)
1702 churn = func(i int) {
1703 SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-churn-i", fmt.Sprint(i))))
1704 if i == 0 {
1705 ready.Done()
1706 } else if i%16 == 0 {
1707
1708
1709 runtime.Gosched()
1710 }
1711 if ctx.Err() == nil {
1712 go churn(i + 1)
1713 }
1714 }
1715 go func() {
1716 churn(0)
1717 }()
1718
1719 ready.Wait()
1720
1721 var w [3]bytes.Buffer
1722 for i := range w {
1723 goroutineProf.WriteTo(&w[i], 0)
1724 }
1725 for i := range w {
1726 p, err := profile.Parse(bytes.NewReader(w[i].Bytes()))
1727 if err != nil {
1728 t.Errorf("error parsing protobuf profile: %v", err)
1729 }
1730
1731
1732
1733 counts := make(map[string]int)
1734 for _, s := range p.Sample {
1735 label := s.Label[t.Name()+"-loop-i"]
1736 if len(label) > 0 {
1737 counts[label[0]]++
1738 }
1739 }
1740 for j, max := 0, len(counts)-1; j <= max; j++ {
1741 n := counts[fmt.Sprint(j)]
1742 if n == 1 || (n == 2 && j == max) {
1743 continue
1744 }
1745 t.Errorf("profile #%d's goroutines with label loop-i:%d; %d != 1 (or 2 for the last entry, %d)",
1746 i+1, j, n, max)
1747 t.Logf("counts %v", counts)
1748 break
1749 }
1750 }
1751 }
1752
1753 runs := 100
1754 if testing.Short() {
1755 runs = 5
1756 }
1757 for i := 0; i < runs; i++ {
1758
1759 t.Run("goroutine launches", testLaunches)
1760 }
1761 }
1762
1763
1764 func TestGoroutineProfileCoro(t *testing.T) {
1765 testenv.MustHaveParallelism(t)
1766
1767 goroutineProf := Lookup("goroutine")
1768
1769
1770 iterFunc := func() {
1771 p, stop := iter.Pull2(
1772 func(yield func(int, int) bool) {
1773 for i := 0; i < 10000; i++ {
1774 if !yield(i, i) {
1775 return
1776 }
1777 }
1778 },
1779 )
1780 defer stop()
1781 for {
1782 _, _, ok := p()
1783 if !ok {
1784 break
1785 }
1786 }
1787 }
1788 var wg sync.WaitGroup
1789 done := make(chan struct{})
1790 wg.Add(1)
1791 go func() {
1792 defer wg.Done()
1793 for {
1794 iterFunc()
1795 select {
1796 case <-done:
1797 default:
1798 }
1799 }
1800 }()
1801
1802
1803
1804 goroutineProf.WriteTo(io.Discard, 1)
1805 }
1806
1807
1808
1809
1810 func TestGoroutineProfileIssue74090(t *testing.T) {
1811 testenv.MustHaveParallelism(t)
1812
1813 goroutineProf := Lookup("goroutine")
1814
1815
1816
1817
1818 type T *byte
1819 for range 10 {
1820
1821
1822
1823
1824
1825
1826
1827
1828 var objs []*T
1829 for range 10000 {
1830 obj := new(T)
1831 runtime.SetFinalizer(obj, func(_ interface{}) {})
1832 objs = append(objs, obj)
1833 }
1834 objs = nil
1835
1836
1837 runtime.GC()
1838
1839
1840
1841 var w strings.Builder
1842 goroutineProf.WriteTo(&w, 1)
1843 }
1844 }
1845
1846 func BenchmarkGoroutine(b *testing.B) {
1847 withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
1848 return func(b *testing.B) {
1849 c := make(chan int)
1850 var ready, done sync.WaitGroup
1851 defer func() {
1852 close(c)
1853 done.Wait()
1854 }()
1855
1856 for i := 0; i < n; i++ {
1857 ready.Add(1)
1858 done.Add(1)
1859 go func() {
1860 ready.Done()
1861 <-c
1862 done.Done()
1863 }()
1864 }
1865
1866 ready.Wait()
1867 for i := 0; i < 5; i++ {
1868 runtime.Gosched()
1869 }
1870
1871 fn(b)
1872 }
1873 }
1874
1875 withChurn := func(fn func(b *testing.B)) func(b *testing.B) {
1876 return func(b *testing.B) {
1877 ctx := context.Background()
1878 ctx, cancel := context.WithCancel(ctx)
1879 defer cancel()
1880
1881 var ready sync.WaitGroup
1882 ready.Add(1)
1883 var count int64
1884 var churn func(i int)
1885 churn = func(i int) {
1886 SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
1887 atomic.AddInt64(&count, 1)
1888 if i == 0 {
1889 ready.Done()
1890 }
1891 if ctx.Err() == nil {
1892 go churn(i + 1)
1893 }
1894 }
1895 go func() {
1896 churn(0)
1897 }()
1898 ready.Wait()
1899
1900 fn(b)
1901 b.ReportMetric(float64(atomic.LoadInt64(&count))/float64(b.N), "concurrent_launches/op")
1902 }
1903 }
1904
1905 benchWriteTo := func(b *testing.B) {
1906 goroutineProf := Lookup("goroutine")
1907 b.ResetTimer()
1908 for i := 0; i < b.N; i++ {
1909 goroutineProf.WriteTo(io.Discard, 0)
1910 }
1911 b.StopTimer()
1912 }
1913
1914 benchGoroutineProfile := func(b *testing.B) {
1915 p := make([]runtime.StackRecord, 10000)
1916 b.ResetTimer()
1917 for i := 0; i < b.N; i++ {
1918 runtime.GoroutineProfile(p)
1919 }
1920 b.StopTimer()
1921 }
1922
1923
1924
1925
1926 for _, n := range []int{50, 500, 5000} {
1927 b.Run(fmt.Sprintf("Profile.WriteTo idle %d", n), withIdle(n, benchWriteTo))
1928 b.Run(fmt.Sprintf("Profile.WriteTo churn %d", n), withIdle(n, withChurn(benchWriteTo)))
1929 b.Run(fmt.Sprintf("runtime.GoroutineProfile churn %d", n), withIdle(n, withChurn(benchGoroutineProfile)))
1930 }
1931 }
1932
1933 var emptyCallStackTestRun int64
1934
1935
1936 func TestEmptyCallStack(t *testing.T) {
1937 name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
1938 emptyCallStackTestRun++
1939
1940 t.Parallel()
1941 var buf strings.Builder
1942 p := NewProfile(name)
1943
1944 p.Add("foo", 47674)
1945 p.WriteTo(&buf, 1)
1946 p.Remove("foo")
1947 got := buf.String()
1948 prefix := name + " profile: total 1\n"
1949 if !strings.HasPrefix(got, prefix) {
1950 t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
1951 }
1952 lostevent := "lostProfileEvent"
1953 if !strings.Contains(got, lostevent) {
1954 t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
1955 }
1956 }
1957
1958
1959
1960 func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
1961 base, kv, ok := strings.Cut(spec, ";")
1962 if !ok {
1963 panic("no semicolon in key/value spec")
1964 }
1965 k, v, ok := strings.Cut(kv, "=")
1966 if !ok {
1967 panic("missing = in key/value spec")
1968 }
1969 if !slices.Contains(labels[k], v) {
1970 return false
1971 }
1972 return stackContains(base, count, stk, labels)
1973 }
1974
1975 func TestCPUProfileLabel(t *testing.T) {
1976 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
1977 testCPUProfile(t, matches, func(dur time.Duration) {
1978 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1979 cpuHogger(cpuHog1, &salt1, dur)
1980 })
1981 })
1982 }
1983
1984 func TestLabelRace(t *testing.T) {
1985 testenv.MustHaveParallelism(t)
1986
1987
1988
1989 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
1990 testCPUProfile(t, matches, func(dur time.Duration) {
1991 start := time.Now()
1992 var wg sync.WaitGroup
1993 for time.Since(start) < dur {
1994 var salts [10]int
1995 for i := 0; i < 10; i++ {
1996 wg.Add(1)
1997 go func(j int) {
1998 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1999 cpuHogger(cpuHog1, &salts[j], time.Millisecond)
2000 })
2001 wg.Done()
2002 }(i)
2003 }
2004 wg.Wait()
2005 }
2006 })
2007 }
2008
2009 func TestGoroutineProfileLabelRace(t *testing.T) {
2010 testenv.MustHaveParallelism(t)
2011
2012
2013
2014
2015 t.Run("reset", func(t *testing.T) {
2016 ctx := context.Background()
2017 ctx, cancel := context.WithCancel(ctx)
2018 defer cancel()
2019
2020 go func() {
2021 goroutineProf := Lookup("goroutine")
2022 for ctx.Err() == nil {
2023 var w strings.Builder
2024 goroutineProf.WriteTo(&w, 1)
2025 prof := w.String()
2026 if strings.Contains(prof, "loop-i") {
2027 cancel()
2028 }
2029 }
2030 }()
2031
2032 for i := 0; ctx.Err() == nil; i++ {
2033 Do(ctx, Labels("loop-i", fmt.Sprint(i)), func(ctx context.Context) {
2034 })
2035 }
2036 })
2037
2038 t.Run("churn", func(t *testing.T) {
2039 ctx := context.Background()
2040 ctx, cancel := context.WithCancel(ctx)
2041 defer cancel()
2042
2043 var ready sync.WaitGroup
2044 ready.Add(1)
2045 var churn func(i int)
2046 churn = func(i int) {
2047 SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
2048 if i == 0 {
2049 ready.Done()
2050 }
2051 if ctx.Err() == nil {
2052 go churn(i + 1)
2053 }
2054 }
2055 go func() {
2056 churn(0)
2057 }()
2058 ready.Wait()
2059
2060 goroutineProf := Lookup("goroutine")
2061 for i := 0; i < 10; i++ {
2062 goroutineProf.WriteTo(io.Discard, 1)
2063 }
2064 })
2065 }
2066
2067
2068
2069 func TestLabelSystemstack(t *testing.T) {
2070
2071
2072 gogc := debug.SetGCPercent(100)
2073 debug.SetGCPercent(gogc)
2074
2075 matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
2076 p := testCPUProfile(t, matches, func(dur time.Duration) {
2077 Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
2078 parallelLabelHog(ctx, dur, gogc)
2079 })
2080 })
2081
2082
2083
2084
2085 for _, s := range p.Sample {
2086 isLabeled := s.Label != nil && slices.Contains(s.Label["key"], "value")
2087 var (
2088 mayBeLabeled bool
2089 mustBeLabeled string
2090 mustNotBeLabeled string
2091 )
2092 for _, loc := range s.Location {
2093 for _, l := range loc.Line {
2094 switch l.Function.Name {
2095 case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
2096 mustBeLabeled = l.Function.Name
2097 case "runtime/pprof.Do":
2098
2099
2100
2101
2102 mayBeLabeled = true
2103 case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runFinalizers", "runtime.runCleanups", "runtime.sysmon":
2104
2105
2106
2107
2108 mustNotBeLabeled = l.Function.Name
2109 case "gogo", "gosave_systemstack_switch", "racecall":
2110
2111
2112
2113
2114
2115
2116 mayBeLabeled = true
2117 }
2118
2119 if strings.HasPrefix(l.Function.Name, "runtime.") {
2120
2121
2122
2123
2124
2125
2126 mayBeLabeled = true
2127 }
2128 }
2129 }
2130 errorStack := func(f string, args ...any) {
2131 var buf strings.Builder
2132 fprintStack(&buf, s.Location)
2133 t.Errorf("%s: %s", fmt.Sprintf(f, args...), buf.String())
2134 }
2135 if mustBeLabeled != "" && mustNotBeLabeled != "" {
2136 errorStack("sample contains both %s, which must be labeled, and %s, which must not be labeled", mustBeLabeled, mustNotBeLabeled)
2137 continue
2138 }
2139 if mustBeLabeled != "" || mustNotBeLabeled != "" {
2140
2141 mayBeLabeled = false
2142 }
2143 if mayBeLabeled {
2144
2145 continue
2146 }
2147 if mustBeLabeled != "" && !isLabeled {
2148 errorStack("sample must be labeled because of %s, but is not", mustBeLabeled)
2149 }
2150 if mustNotBeLabeled != "" && isLabeled {
2151 errorStack("sample must not be labeled because of %s, but is", mustNotBeLabeled)
2152 }
2153 }
2154 }
2155
2156
2157
2158 func labelHog(stop chan struct{}, gogc int) {
2159
2160
2161 runtime.GC()
2162
2163 for i := 0; ; i++ {
2164 select {
2165 case <-stop:
2166 return
2167 default:
2168 debug.SetGCPercent(gogc)
2169 }
2170 }
2171 }
2172
2173
2174 func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
2175 var wg sync.WaitGroup
2176 stop := make(chan struct{})
2177 for i := 0; i < runtime.GOMAXPROCS(0); i++ {
2178 wg.Add(1)
2179 go func() {
2180 defer wg.Done()
2181 labelHog(stop, gogc)
2182 }()
2183 }
2184
2185 time.Sleep(dur)
2186 close(stop)
2187 wg.Wait()
2188 }
2189
2190
2191
2192 func TestAtomicLoadStore64(t *testing.T) {
2193 f, err := os.CreateTemp("", "profatomic")
2194 if err != nil {
2195 t.Fatalf("TempFile: %v", err)
2196 }
2197 defer os.Remove(f.Name())
2198 defer f.Close()
2199
2200 if err := StartCPUProfile(f); err != nil {
2201 t.Fatal(err)
2202 }
2203 defer StopCPUProfile()
2204
2205 var flag uint64
2206 done := make(chan bool, 1)
2207
2208 go func() {
2209 for atomic.LoadUint64(&flag) == 0 {
2210 runtime.Gosched()
2211 }
2212 done <- true
2213 }()
2214 time.Sleep(50 * time.Millisecond)
2215 atomic.StoreUint64(&flag, 1)
2216 <-done
2217 }
2218
2219 func TestTracebackAll(t *testing.T) {
2220
2221
2222 f, err := os.CreateTemp("", "proftraceback")
2223 if err != nil {
2224 t.Fatalf("TempFile: %v", err)
2225 }
2226 defer os.Remove(f.Name())
2227 defer f.Close()
2228
2229 if err := StartCPUProfile(f); err != nil {
2230 t.Fatal(err)
2231 }
2232 defer StopCPUProfile()
2233
2234 ch := make(chan int)
2235 defer close(ch)
2236
2237 count := 10
2238 for i := 0; i < count; i++ {
2239 go func() {
2240 <-ch
2241 }()
2242 }
2243
2244 N := 10000
2245 if testing.Short() {
2246 N = 500
2247 }
2248 buf := make([]byte, 10*1024)
2249 for i := 0; i < N; i++ {
2250 runtime.Stack(buf, true)
2251 }
2252 }
2253
2254
2255
2256
2257
2258
2259
2260 func TestTryAdd(t *testing.T) {
2261 if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
2262 t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
2263 }
2264
2265
2266
2267 pcs := make([]uintptr, 2)
2268 inlinedCallerDump(pcs)
2269 inlinedCallerStack := make([]uint64, 2)
2270 for i := range pcs {
2271 inlinedCallerStack[i] = uint64(pcs[i])
2272 }
2273 wrapperPCs := make([]uintptr, 1)
2274 inlinedWrapperCallerDump(wrapperPCs)
2275
2276 if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
2277 t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
2278 }
2279
2280
2281
2282
2283
2284
2285
2286 pcs = make([]uintptr, 6)
2287 recursionChainTop(1, pcs)
2288 recursionStack := make([]uint64, len(pcs))
2289 for i := range pcs {
2290 recursionStack[i] = uint64(pcs[i])
2291 }
2292
2293 period := int64(2000 * 1000)
2294
2295 testCases := []struct {
2296 name string
2297 input []uint64
2298 count int
2299 wantLocs [][]string
2300 wantSamples []*profile.Sample
2301 }{{
2302
2303 name: "full_stack_trace",
2304 input: []uint64{
2305 3, 0, 500,
2306 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
2307 },
2308 count: 2,
2309 wantLocs: [][]string{
2310 {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
2311 },
2312 wantSamples: []*profile.Sample{
2313 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2314 },
2315 }, {
2316 name: "bug35538",
2317 input: []uint64{
2318 3, 0, 500,
2319
2320
2321
2322 7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
2323 5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
2324 },
2325 count: 3,
2326 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2327 wantSamples: []*profile.Sample{
2328 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
2329 {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
2330 },
2331 }, {
2332 name: "bug38096",
2333 input: []uint64{
2334 3, 0, 500,
2335
2336
2337 4, 0, 0, 4242,
2338 },
2339 count: 2,
2340 wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
2341 wantSamples: []*profile.Sample{
2342 {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
2343 },
2344 }, {
2345
2346
2347
2348
2349
2350
2351
2352 name: "directly_recursive_func_is_not_inlined",
2353 input: []uint64{
2354 3, 0, 500,
2355 5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
2356 4, 0, 40, inlinedCallerStack[0],
2357 },
2358 count: 3,
2359
2360
2361 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
2362 wantSamples: []*profile.Sample{
2363 {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
2364 {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
2365 },
2366 }, {
2367 name: "recursion_chain_inline",
2368 input: []uint64{
2369 3, 0, 500,
2370 9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
2371 },
2372 count: 2,
2373 wantLocs: [][]string{
2374 {"runtime/pprof.recursionChainBottom"},
2375 {
2376 "runtime/pprof.recursionChainMiddle",
2377 "runtime/pprof.recursionChainTop",
2378 "runtime/pprof.recursionChainBottom",
2379 },
2380 {
2381 "runtime/pprof.recursionChainMiddle",
2382 "runtime/pprof.recursionChainTop",
2383 "runtime/pprof.TestTryAdd",
2384 },
2385 },
2386 wantSamples: []*profile.Sample{
2387 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
2388 },
2389 }, {
2390 name: "truncated_stack_trace_later",
2391 input: []uint64{
2392 3, 0, 500,
2393 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
2394 4, 0, 60, inlinedCallerStack[0],
2395 },
2396 count: 3,
2397 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2398 wantSamples: []*profile.Sample{
2399 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2400 {Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
2401 },
2402 }, {
2403 name: "truncated_stack_trace_first",
2404 input: []uint64{
2405 3, 0, 500,
2406 4, 0, 70, inlinedCallerStack[0],
2407 5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
2408 },
2409 count: 3,
2410 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2411 wantSamples: []*profile.Sample{
2412 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2413 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
2414 },
2415 }, {
2416
2417 name: "truncated_stack_trace_only",
2418 input: []uint64{
2419 3, 0, 500,
2420 4, 0, 70, inlinedCallerStack[0],
2421 },
2422 count: 2,
2423 wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
2424 wantSamples: []*profile.Sample{
2425 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2426 },
2427 }, {
2428
2429 name: "truncated_stack_trace_twice",
2430 input: []uint64{
2431 3, 0, 500,
2432 4, 0, 70, inlinedCallerStack[0],
2433
2434
2435
2436 5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
2437 },
2438 count: 3,
2439 wantLocs: [][]string{
2440 {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
2441 {"runtime/pprof.inlinedCallerDump"},
2442 },
2443 wantSamples: []*profile.Sample{
2444 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
2445 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
2446 },
2447 }, {
2448 name: "expand_wrapper_function",
2449 input: []uint64{
2450 3, 0, 500,
2451 4, 0, 50, uint64(wrapperPCs[0]),
2452 },
2453 count: 2,
2454 wantLocs: [][]string{{"runtime/pprof.inlineWrapper.dump"}},
2455 wantSamples: []*profile.Sample{
2456 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
2457 },
2458 }}
2459
2460 for _, tc := range testCases {
2461 t.Run(tc.name, func(t *testing.T) {
2462 p, err := translateCPUProfile(tc.input, tc.count)
2463 if err != nil {
2464 t.Fatalf("translating profile: %v", err)
2465 }
2466 t.Logf("Profile: %v\n", p)
2467
2468
2469 var gotLoc [][]string
2470 for _, loc := range p.Location {
2471 var names []string
2472 for _, line := range loc.Line {
2473 names = append(names, line.Function.Name)
2474 }
2475 gotLoc = append(gotLoc, names)
2476 }
2477 if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
2478 t.Errorf("Got Location = %+v\n\twant %+v", got, want)
2479 }
2480
2481 var gotSamples []*profile.Sample
2482 for _, sample := range p.Sample {
2483 var locs []*profile.Location
2484 for _, loc := range sample.Location {
2485 locs = append(locs, &profile.Location{ID: loc.ID})
2486 }
2487 gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
2488 }
2489 if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
2490 t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
2491 }
2492 })
2493 }
2494 }
2495
2496 func TestTimeVDSO(t *testing.T) {
2497
2498
2499
2500 if runtime.GOOS == "android" {
2501
2502 testenv.SkipFlaky(t, 48655)
2503 }
2504
2505 matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
2506 p := testCPUProfile(t, matches, func(dur time.Duration) {
2507 t0 := time.Now()
2508 for {
2509 t := time.Now()
2510 if t.Sub(t0) >= dur {
2511 return
2512 }
2513 }
2514 })
2515
2516
2517 for _, sample := range p.Sample {
2518 var seenNow bool
2519 for _, loc := range sample.Location {
2520 for _, line := range loc.Line {
2521 if line.Function.Name == "time.now" {
2522 if seenNow {
2523 t.Fatalf("unexpected recursive time.now")
2524 }
2525 seenNow = true
2526 }
2527 }
2528 }
2529 }
2530 }
2531
2532 func TestProfilerStackDepth(t *testing.T) {
2533 t.Cleanup(disableSampling())
2534
2535 const depth = 128
2536 go produceProfileEvents(t, depth)
2537 awaitBlockedGoroutine(t, "chan receive", "goroutineDeep", 1)
2538
2539 tests := []struct {
2540 profiler string
2541 prefix []string
2542 }{
2543 {"heap", []string{"runtime/pprof.allocDeep"}},
2544 {"block", []string{"runtime.chanrecv1", "runtime/pprof.blockChanDeep"}},
2545 {"mutex", []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexDeep"}},
2546 {"goroutine", []string{"runtime.gopark", "runtime.chanrecv", "runtime.chanrecv1", "runtime/pprof.goroutineDeep"}},
2547 }
2548
2549 for _, test := range tests {
2550 t.Run(test.profiler, func(t *testing.T) {
2551 var buf bytes.Buffer
2552 if err := Lookup(test.profiler).WriteTo(&buf, 0); err != nil {
2553 t.Fatalf("failed to write heap profile: %v", err)
2554 }
2555 p, err := profile.Parse(&buf)
2556 if err != nil {
2557 t.Fatalf("failed to parse heap profile: %v", err)
2558 }
2559 t.Logf("Profile = %v", p)
2560
2561 stks := profileStacks(p)
2562 var matchedStacks [][]string
2563 for _, stk := range stks {
2564 if !hasPrefix(stk, test.prefix) {
2565 continue
2566 }
2567
2568
2569
2570
2571 matchedStacks = append(matchedStacks, stk)
2572 if len(stk) != depth {
2573 continue
2574 }
2575 if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn {
2576 continue
2577 }
2578
2579 return
2580 }
2581 for _, stk := range matchedStacks {
2582 t.Logf("matched stack=%s", stk)
2583 if len(stk) != depth {
2584 t.Errorf("want stack depth = %d, got %d", depth, len(stk))
2585 }
2586
2587 if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn {
2588 t.Errorf("want stack stack root %s, got %v", wantFn, rootFn)
2589 }
2590 }
2591 })
2592 }
2593 }
2594
2595 func hasPrefix(stk []string, prefix []string) bool {
2596 return len(prefix) <= len(stk) && slices.Equal(stk[:len(prefix)], prefix)
2597 }
2598
2599
2600 var _ = map[runtime.MemProfileRecord]struct{}{}
2601 var _ = map[runtime.StackRecord]struct{}{}
2602
2603
2604 func allocDeep(n int) {
2605 if n > 1 {
2606 allocDeep(n - 1)
2607 return
2608 }
2609 memSink = make([]byte, 1<<20)
2610 }
2611
2612
2613
2614 func blockChanDeep(t *testing.T, n int) {
2615 if n > 1 {
2616 blockChanDeep(t, n-1)
2617 return
2618 }
2619 ch := make(chan struct{})
2620 go func() {
2621 awaitBlockedGoroutine(t, "chan receive", "blockChanDeep", 1)
2622 ch <- struct{}{}
2623 }()
2624 <-ch
2625 }
2626
2627
2628
2629 func blockMutexDeep(t *testing.T, n int) {
2630 if n > 1 {
2631 blockMutexDeep(t, n-1)
2632 return
2633 }
2634 var mu sync.Mutex
2635 go func() {
2636 mu.Lock()
2637 mu.Lock()
2638 }()
2639 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutexDeep", 1)
2640 mu.Unlock()
2641 }
2642
2643
2644
2645 func goroutineDeep(t *testing.T, n int) {
2646 if n > 1 {
2647 goroutineDeep(t, n-1)
2648 return
2649 }
2650 wait := make(chan struct{}, 1)
2651 t.Cleanup(func() {
2652 wait <- struct{}{}
2653 })
2654 <-wait
2655 }
2656
2657
2658
2659
2660
2661 func produceProfileEvents(t *testing.T, depth int) {
2662 allocDeep(depth - 1)
2663 blockChanDeep(t, depth-2)
2664 blockMutexDeep(t, depth-2)
2665 memSink = nil
2666 runtime.GC()
2667 goroutineDeep(t, depth-4)
2668 }
2669
2670 func getProfileStacks(collect func([]runtime.BlockProfileRecord) (int, bool), fileLine bool, pcs bool) []string {
2671 var n int
2672 var ok bool
2673 var p []runtime.BlockProfileRecord
2674 for {
2675 p = make([]runtime.BlockProfileRecord, n)
2676 n, ok = collect(p)
2677 if ok {
2678 p = p[:n]
2679 break
2680 }
2681 }
2682 var stacks []string
2683 for _, r := range p {
2684 var stack strings.Builder
2685 for i, pc := range r.Stack() {
2686 if i > 0 {
2687 stack.WriteByte('\n')
2688 }
2689 if pcs {
2690 fmt.Fprintf(&stack, "%x ", pc)
2691 }
2692
2693
2694
2695
2696
2697 f := runtime.FuncForPC(pc - 1)
2698 stack.WriteString(f.Name())
2699 if fileLine {
2700 stack.WriteByte(' ')
2701 file, line := f.FileLine(pc - 1)
2702 stack.WriteString(file)
2703 stack.WriteByte(':')
2704 stack.WriteString(strconv.Itoa(line))
2705 }
2706 }
2707 stacks = append(stacks, stack.String())
2708 }
2709 return stacks
2710 }
2711
2712 func TestMutexBlockFullAggregation(t *testing.T) {
2713
2714
2715
2716
2717 var m sync.Mutex
2718
2719 prev := runtime.SetMutexProfileFraction(-1)
2720 defer runtime.SetMutexProfileFraction(prev)
2721
2722 const fraction = 1
2723 const iters = 100
2724 const workers = 2
2725
2726 runtime.SetMutexProfileFraction(fraction)
2727 runtime.SetBlockProfileRate(1)
2728 defer runtime.SetBlockProfileRate(0)
2729
2730 wg := sync.WaitGroup{}
2731 wg.Add(workers)
2732 for range workers {
2733 go func() {
2734 for range iters {
2735 m.Lock()
2736
2737
2738 time.Sleep(time.Millisecond)
2739 m.Unlock()
2740 }
2741 wg.Done()
2742 }()
2743 }
2744 wg.Wait()
2745
2746 assertNoDuplicates := func(name string, collect func([]runtime.BlockProfileRecord) (int, bool)) {
2747 stacks := getProfileStacks(collect, true, true)
2748 seen := make(map[string]struct{})
2749 for _, s := range stacks {
2750 if _, ok := seen[s]; ok {
2751 t.Errorf("saw duplicate entry in %s profile with stack:\n%s", name, s)
2752 }
2753 seen[s] = struct{}{}
2754 }
2755 if len(seen) == 0 {
2756 t.Errorf("did not see any samples in %s profile for this test", name)
2757 }
2758 }
2759 t.Run("mutex", func(t *testing.T) {
2760 assertNoDuplicates("mutex", runtime.MutexProfile)
2761 })
2762 t.Run("block", func(t *testing.T) {
2763 assertNoDuplicates("block", runtime.BlockProfile)
2764 })
2765 }
2766
2767 func inlineA(mu *sync.Mutex, wg *sync.WaitGroup) { inlineB(mu, wg) }
2768 func inlineB(mu *sync.Mutex, wg *sync.WaitGroup) { inlineC(mu, wg) }
2769 func inlineC(mu *sync.Mutex, wg *sync.WaitGroup) {
2770 defer wg.Done()
2771 mu.Lock()
2772 mu.Unlock()
2773 }
2774
2775 func inlineD(mu *sync.Mutex, wg *sync.WaitGroup) { inlineE(mu, wg) }
2776 func inlineE(mu *sync.Mutex, wg *sync.WaitGroup) { inlineF(mu, wg) }
2777 func inlineF(mu *sync.Mutex, wg *sync.WaitGroup) {
2778 defer wg.Done()
2779 mu.Unlock()
2780 }
2781
2782 func TestBlockMutexProfileInlineExpansion(t *testing.T) {
2783 runtime.SetBlockProfileRate(1)
2784 defer runtime.SetBlockProfileRate(0)
2785 prev := runtime.SetMutexProfileFraction(1)
2786 defer runtime.SetMutexProfileFraction(prev)
2787
2788 var mu sync.Mutex
2789 var wg sync.WaitGroup
2790 wg.Add(2)
2791 mu.Lock()
2792 go inlineA(&mu, &wg)
2793 awaitBlockedGoroutine(t, "sync.Mutex.Lock", "inlineC", 1)
2794
2795 go inlineD(&mu, &wg)
2796 wg.Wait()
2797
2798 tcs := []struct {
2799 Name string
2800 Collect func([]runtime.BlockProfileRecord) (int, bool)
2801 SubStack string
2802 }{
2803 {
2804 Name: "mutex",
2805 Collect: runtime.MutexProfile,
2806 SubStack: `sync.(*Mutex).Unlock
2807 runtime/pprof.inlineF
2808 runtime/pprof.inlineE
2809 runtime/pprof.inlineD`,
2810 },
2811 {
2812 Name: "block",
2813 Collect: runtime.BlockProfile,
2814 SubStack: `sync.(*Mutex).Lock
2815 runtime/pprof.inlineC
2816 runtime/pprof.inlineB
2817 runtime/pprof.inlineA`,
2818 },
2819 }
2820
2821 for _, tc := range tcs {
2822 t.Run(tc.Name, func(t *testing.T) {
2823 stacks := getProfileStacks(tc.Collect, false, false)
2824 for _, s := range stacks {
2825 if strings.Contains(s, tc.SubStack) {
2826 return
2827 }
2828 }
2829 t.Error("did not see expected stack")
2830 t.Logf("wanted:\n%s", tc.SubStack)
2831 t.Logf("got: %s", stacks)
2832 })
2833 }
2834 }
2835
2836 func TestProfileRecordNullPadding(t *testing.T) {
2837
2838 t.Cleanup(disableSampling())
2839 memSink = make([]byte, 1)
2840 <-time.After(time.Millisecond)
2841 blockMutex(t)
2842 runtime.GC()
2843
2844
2845 testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile)
2846 testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile)
2847 testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile)
2848 testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) {
2849 return runtime.MemProfile(p, true)
2850 })
2851 testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) {
2852 return runtime.MemProfile(p, false)
2853 })
2854
2855 }
2856
2857 func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {
2858 stack0 := func(sr *T) *[32]uintptr {
2859 switch t := any(sr).(type) {
2860 case *runtime.StackRecord:
2861 return &t.Stack0
2862 case *runtime.MemProfileRecord:
2863 return &t.Stack0
2864 case *runtime.BlockProfileRecord:
2865 return &t.Stack0
2866 default:
2867 panic(fmt.Sprintf("unexpected type %T", sr))
2868 }
2869 }
2870
2871 t.Run(name, func(t *testing.T) {
2872 var p []T
2873 for {
2874 n, ok := fn(p)
2875 if ok {
2876 p = p[:n]
2877 break
2878 }
2879 p = make([]T, n*2)
2880 for i := range p {
2881 s0 := stack0(&p[i])
2882 for j := range s0 {
2883
2884 s0[j] = ^uintptr(0)
2885 }
2886 }
2887 }
2888
2889 if len(p) == 0 {
2890 t.Fatal("no records found")
2891 }
2892
2893 for _, sr := range p {
2894 for i, v := range stack0(&sr) {
2895 if v == ^uintptr(0) {
2896 t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr)
2897 }
2898 }
2899 }
2900 })
2901 }
2902
2903
2904
2905 func disableSampling() func() {
2906 oldMemRate := runtime.MemProfileRate
2907 runtime.MemProfileRate = 1
2908 runtime.SetBlockProfileRate(1)
2909 oldMutexRate := runtime.SetMutexProfileFraction(1)
2910 return func() {
2911 runtime.MemProfileRate = oldMemRate
2912 runtime.SetBlockProfileRate(0)
2913 runtime.SetMutexProfileFraction(oldMutexRate)
2914 }
2915 }
2916
View as plain text