]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgo/go/runtime/pprof/pprof_test.go
libgo: update to Go1.14beta1
[thirdparty/gcc.git] / libgo / go / runtime / pprof / pprof_test.go
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // +build !js
6
7 package pprof
8
9 import (
10 "bytes"
11 "context"
12 "fmt"
13 "internal/testenv"
14 "io"
15 "io/ioutil"
16 "math/big"
17 "os"
18 "os/exec"
19 "regexp"
20 "runtime"
21 "runtime/pprof/internal/profile"
22 "strings"
23 "sync"
24 "sync/atomic"
25 "testing"
26 "time"
27 )
28
29 func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
30 // We only need to get one 100 Hz clock tick, so we've got
31 // a large safety buffer.
32 // But do at least 500 iterations (which should take about 100ms),
33 // otherwise TestCPUProfileMultithreaded can fail if only one
34 // thread is scheduled during the testing period.
35 t0 := time.Now()
36 accum := *y
37 for i := 0; i < 500 || time.Since(t0) < dur; i++ {
38 accum = f(accum)
39 }
40 *y = accum
41 }
42
43 var (
44 salt1 = 0
45 salt2 = 0
46 )
47
48 // The actual CPU hogging function.
49 // Must not call other functions nor access heap/globals in the loop,
50 // otherwise under race detector the samples will be in the race runtime.
51 func cpuHog1(x int) int {
52 return cpuHog0(x, 1e5)
53 }
54
55 func cpuHog0(x, n int) int {
56 foo := x
57 for i := 0; i < n; i++ {
58 if foo > 0 {
59 foo *= foo
60 } else {
61 foo *= foo + 1
62 }
63 }
64 return foo
65 }
66
67 func cpuHog2(x int) int {
68 foo := x
69 for i := 0; i < 1e5; i++ {
70 if foo > 0 {
71 foo *= foo
72 } else {
73 foo *= foo + 2
74 }
75 }
76 return foo
77 }
78
79 // Return a list of functions that we don't want to ever appear in CPU
80 // profiles. For gccgo, that list includes the sigprof handler itself.
81 func avoidFunctions() []string {
82 if runtime.Compiler == "gccgo" {
83 return []string{"runtime.sigprof"}
84 }
85 return nil
86 }
87
88 func TestCPUProfile(t *testing.T) {
89 testCPUProfile(t, stackContains, []string{"pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
90 cpuHogger(cpuHog1, &salt1, dur)
91 })
92 }
93
94 func TestCPUProfileMultithreaded(t *testing.T) {
95 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
96 testCPUProfile(t, stackContains, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
97 c := make(chan int)
98 go func() {
99 cpuHogger(cpuHog1, &salt1, dur)
100 c <- 1
101 }()
102 cpuHogger(cpuHog2, &salt2, dur)
103 <-c
104 })
105 }
106
107 // containsInlinedCall reports whether the function body for the function f is
108 // known to contain an inlined function call within the first maxBytes bytes.
109 func containsInlinedCall(f interface{}, maxBytes int) bool {
110 _, found := findInlinedCall(f, maxBytes)
111 return found
112 }
113
114 // findInlinedCall returns the PC of an inlined function call within
115 // the function body for the function f if any.
116 func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
117 fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
118 if fFunc == nil || fFunc.Entry() == 0 {
119 panic("failed to locate function entry")
120 }
121
122 for offset := 0; offset < maxBytes; offset++ {
123 innerPC := fFunc.Entry() + uintptr(offset)
124 inner := runtime.FuncForPC(innerPC)
125 if inner == nil {
126 // No function known for this PC value.
127 // It might simply be misaligned, so keep searching.
128 continue
129 }
130 if inner.Entry() != fFunc.Entry() {
131 // Scanned past f and didn't find any inlined functions.
132 break
133 }
134 if inner.Name() != fFunc.Name() {
135 // This PC has f as its entry-point, but is not f. Therefore, it must be a
136 // function inlined into f.
137 return uint64(innerPC), true
138 }
139 }
140
141 return 0, false
142 }
143
144 func TestCPUProfileInlining(t *testing.T) {
145 if !containsInlinedCall(inlinedCaller, 4<<10) {
146 t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
147 }
148
149 p := testCPUProfile(t, stackContains, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
150 cpuHogger(inlinedCaller, &salt1, dur)
151 })
152
153 // Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
154 for _, loc := range p.Location {
155 hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
156 for _, line := range loc.Line {
157 if line.Function.Name == "runtime/pprof.inlinedCallee" {
158 hasInlinedCallee = true
159 }
160 if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
161 hasInlinedCallerAfterInlinedCallee = true
162 }
163 }
164 if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
165 t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
166 }
167 }
168 }
169
170 func inlinedCaller(x int) int {
171 x = inlinedCallee(x, 1e5)
172 return x
173 }
174
175 func inlinedCallee(x, n int) int {
176 return cpuHog0(x, n)
177 }
178
179 func TestCPUProfileRecursion(t *testing.T) {
180 p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) {
181 cpuHogger(recursionCaller, &salt1, dur)
182 })
183
184 // check the Location encoding was not confused by recursive calls.
185 for i, loc := range p.Location {
186 recursionFunc := 0
187 for _, line := range loc.Line {
188 if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
189 recursionFunc++
190 }
191 }
192 if recursionFunc > 1 {
193 t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
194 }
195 }
196 }
197
198 func recursionCaller(x int) int {
199 y := recursionCallee(3, x)
200 return y
201 }
202
203 func recursionCallee(n, x int) int {
204 if n == 0 {
205 return 1
206 }
207 y := inlinedCallee(x, 1e4)
208 return y * recursionCallee(n-1, x)
209 }
210
211 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
212 p, err := profile.Parse(bytes.NewReader(valBytes))
213 if err != nil {
214 t.Fatal(err)
215 }
216 for _, sample := range p.Sample {
217 count := uintptr(sample.Value[0])
218 f(count, sample.Location, sample.Label)
219 }
220 return p
221 }
222
223 // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
224 // as interpreted by matches, and returns the parsed profile.
225 func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile {
226 switch runtime.GOOS {
227 case "darwin":
228 switch runtime.GOARCH {
229 case "arm", "arm64":
230 // nothing
231 default:
232 out, err := exec.Command("uname", "-a").CombinedOutput()
233 if err != nil {
234 t.Fatal(err)
235 }
236 vers := string(out)
237 t.Logf("uname -a: %v", vers)
238 }
239 case "plan9":
240 t.Skip("skipping on plan9")
241 }
242
243 broken := false
244 switch runtime.GOOS {
245 case "darwin", "dragonfly", "netbsd", "illumos", "solaris":
246 broken = true
247 case "openbsd":
248 if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
249 broken = true
250 }
251 }
252
253 maxDuration := 5 * time.Second
254 if testing.Short() && broken {
255 // If it's expected to be broken, no point waiting around.
256 maxDuration /= 10
257 }
258
259 // If we're running a long test, start with a long duration
260 // for tests that try to make sure something *doesn't* happen.
261 duration := 5 * time.Second
262 if testing.Short() {
263 duration = 100 * time.Millisecond
264 }
265
266 // Profiling tests are inherently flaky, especially on a
267 // loaded system, such as when this test is running with
268 // several others under go test std. If a test fails in a way
269 // that could mean it just didn't run long enough, try with a
270 // longer duration.
271 for duration <= maxDuration {
272 var prof bytes.Buffer
273 if err := StartCPUProfile(&prof); err != nil {
274 t.Fatal(err)
275 }
276 f(duration)
277 StopCPUProfile()
278
279 if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok {
280 return p
281 }
282
283 duration *= 2
284 if duration <= maxDuration {
285 t.Logf("retrying with %s duration", duration)
286 }
287 }
288
289 if broken {
290 t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
291 }
292
293 // Ignore the failure if the tests are running in a QEMU-based emulator,
294 // QEMU is not perfect at emulating everything.
295 // IN_QEMU environmental variable is set by some of the Go builders.
296 // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
297 if os.Getenv("IN_QEMU") == "1" {
298 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
299 }
300 t.FailNow()
301 return nil
302 }
303
304 func contains(slice []string, s string) bool {
305 for i := range slice {
306 if slice[i] == s {
307 return true
308 }
309 }
310 return false
311 }
312
313 // stackContains matches if a function named spec appears anywhere in the stack trace.
314 func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
315 for _, loc := range stk {
316 for _, line := range loc.Line {
317 if strings.Contains(line.Function.Name, spec) {
318 return true
319 }
320 }
321 }
322 return false
323 }
324
325 type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
326
327 func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
328 ok = true
329
330 // Check that profile is well formed, contains 'need', and does not contain
331 // anything from 'avoid'.
332 have := make([]uintptr, len(need))
333 avoidSamples := make([]uintptr, len(avoid))
334 var samples uintptr
335 var buf bytes.Buffer
336 p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
337 fmt.Fprintf(&buf, "%d:", count)
338 fprintStack(&buf, stk)
339 samples += count
340 for i, spec := range need {
341 if matches(spec, count, stk, labels) {
342 have[i] += count
343 }
344 }
345 for i, name := range avoid {
346 for _, loc := range stk {
347 for _, line := range loc.Line {
348 if strings.Contains(line.Function.Name, name) {
349 avoidSamples[i] += count
350 }
351 }
352 }
353 }
354 for i, name := range avoid {
355 for _, loc := range stk {
356 for _, line := range loc.Line {
357 if strings.Contains(line.Function.Name, name) {
358 avoidSamples[i] += count
359 }
360 }
361 }
362 }
363 fmt.Fprintf(&buf, "\n")
364 })
365 t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
366
367 if samples < 10 && runtime.GOOS == "windows" {
368 // On some windows machines we end up with
369 // not enough samples due to coarse timer
370 // resolution. Let it go.
371 t.Log("too few samples on Windows (golang.org/issue/10842)")
372 return p, false
373 }
374
375 // Check that we got a reasonable number of samples.
376 // We used to always require at least ideal/4 samples,
377 // but that is too hard to guarantee on a loaded system.
378 // Now we accept 10 or more samples, which we take to be
379 // enough to show that at least some profiling is occurring.
380 if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
381 t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
382 ok = false
383 }
384
385 for i, name := range avoid {
386 bad := avoidSamples[i]
387 if bad != 0 {
388 t.Logf("found %d samples in avoid-function %s\n", bad, name)
389 ok = false
390 }
391 }
392
393 if len(need) == 0 {
394 return p, ok
395 }
396
397 var total uintptr
398 for i, name := range need {
399 total += have[i]
400 t.Logf("%s: %d\n", name, have[i])
401 }
402 if total == 0 {
403 t.Logf("no samples in expected functions")
404 ok = false
405 }
406 // We'd like to check a reasonable minimum, like
407 // total / len(have) / smallconstant, but this test is
408 // pretty flaky (see bug 7095). So we'll just test to
409 // make sure we got at least one sample.
410 min := uintptr(1)
411 for i, name := range need {
412 if have[i] < min {
413 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
414 ok = false
415 }
416 }
417 return p, ok
418 }
419
420 // Fork can hang if preempted with signals frequently enough (see issue 5517).
421 // Ensure that we do not do this.
422 func TestCPUProfileWithFork(t *testing.T) {
423 testenv.MustHaveExec(t)
424
425 heap := 1 << 30
426 if runtime.GOOS == "android" {
427 // Use smaller size for Android to avoid crash.
428 heap = 100 << 20
429 }
430 if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
431 // Use smaller heap for Windows/ARM to avoid crash.
432 heap = 100 << 20
433 }
434 if testing.Short() {
435 heap = 100 << 20
436 }
437 // This makes fork slower.
438 garbage := make([]byte, heap)
439 // Need to touch the slice, otherwise it won't be paged in.
440 done := make(chan bool)
441 go func() {
442 for i := range garbage {
443 garbage[i] = 42
444 }
445 done <- true
446 }()
447 <-done
448
449 var prof bytes.Buffer
450 if err := StartCPUProfile(&prof); err != nil {
451 t.Fatal(err)
452 }
453 defer StopCPUProfile()
454
455 for i := 0; i < 10; i++ {
456 exec.Command(os.Args[0], "-h").CombinedOutput()
457 }
458 }
459
460 // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
461 // If it did, it would see inconsistent state and would either record an incorrect stack
462 // or crash because the stack was malformed.
463 func TestGoroutineSwitch(t *testing.T) {
464 if runtime.Compiler == "gccgo" {
465 t.Skip("not applicable for gccgo")
466 }
467 // How much to try. These defaults take about 1 seconds
468 // on a 2012 MacBook Pro. The ones in short mode take
469 // about 0.1 seconds.
470 tries := 10
471 count := 1000000
472 if testing.Short() {
473 tries = 1
474 }
475 for try := 0; try < tries; try++ {
476 var prof bytes.Buffer
477 if err := StartCPUProfile(&prof); err != nil {
478 t.Fatal(err)
479 }
480 for i := 0; i < count; i++ {
481 runtime.Gosched()
482 }
483 StopCPUProfile()
484
485 // Read profile to look for entries for runtime.gogo with an attempt at a traceback.
486 // The special entry
487 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
488 // An entry with two frames with 'System' in its top frame
489 // exists to record a PC without a traceback. Those are okay.
490 if len(stk) == 2 {
491 name := stk[1].Line[0].Function.Name
492 if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
493 return
494 }
495 }
496
497 // Otherwise, should not see runtime.gogo.
498 // The place we'd see it would be the inner most frame.
499 name := stk[0].Line[0].Function.Name
500 if name == "runtime.gogo" {
501 var buf bytes.Buffer
502 fprintStack(&buf, stk)
503 t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
504 }
505 })
506 }
507 }
508
509 func fprintStack(w io.Writer, stk []*profile.Location) {
510 for _, loc := range stk {
511 fmt.Fprintf(w, " %#x", loc.Address)
512 fmt.Fprintf(w, " (")
513 for i, line := range loc.Line {
514 if i > 0 {
515 fmt.Fprintf(w, " ")
516 }
517 fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
518 }
519 fmt.Fprintf(w, ")")
520 }
521 fmt.Fprintf(w, "\n")
522 }
523
524 // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
525 func TestMathBigDivide(t *testing.T) {
526 testCPUProfile(t, nil, nil, nil, func(duration time.Duration) {
527 t := time.After(duration)
528 pi := new(big.Int)
529 for {
530 for i := 0; i < 100; i++ {
531 n := big.NewInt(2646693125139304345)
532 d := big.NewInt(842468587426513207)
533 pi.Div(n, d)
534 }
535 select {
536 case <-t:
537 return
538 default:
539 }
540 }
541 })
542 }
543
544 // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
545 func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
546 for _, f := range strings.Split(spec, ",") {
547 if !stackContains(f, count, stk, labels) {
548 return false
549 }
550 }
551 return true
552 }
553
554 func TestMorestack(t *testing.T) {
555 if runtime.Compiler == "gccgo" {
556 t.Skip("no runtime.newstack in gccgo")
557 }
558 testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) {
559 t := time.After(duration)
560 c := make(chan bool)
561 for {
562 go func() {
563 growstack1()
564 c <- true
565 }()
566 select {
567 case <-t:
568 return
569 case <-c:
570 }
571 }
572 })
573 }
574
575 //go:noinline
576 func growstack1() {
577 growstack()
578 }
579
580 //go:noinline
581 func growstack() {
582 var buf [8 << 10]byte
583 use(buf)
584 }
585
586 //go:noinline
587 func use(x [8 << 10]byte) {}
588
589 func TestBlockProfile(t *testing.T) {
590 t.Skip("lots of details are different for gccgo; FIXME")
591 type TestCase struct {
592 name string
593 f func()
594 stk []string
595 re string
596 }
597 tests := [...]TestCase{
598 {
599 name: "chan recv",
600 f: blockChanRecv,
601 stk: []string{
602 "runtime.chanrecv1",
603 "runtime/pprof.blockChanRecv",
604 "runtime/pprof.TestBlockProfile",
605 },
606 re: `
607 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
608 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
609 # 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
610 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
611 `},
612 {
613 name: "chan send",
614 f: blockChanSend,
615 stk: []string{
616 "runtime.chansend1",
617 "runtime/pprof.blockChanSend",
618 "runtime/pprof.TestBlockProfile",
619 },
620 re: `
621 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
622 # 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
623 # 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
624 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
625 `},
626 {
627 name: "chan close",
628 f: blockChanClose,
629 stk: []string{
630 "runtime.chanrecv1",
631 "runtime/pprof.blockChanClose",
632 "runtime/pprof.TestBlockProfile",
633 },
634 re: `
635 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
636 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
637 # 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
638 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
639 `},
640 {
641 name: "select recv async",
642 f: blockSelectRecvAsync,
643 stk: []string{
644 "runtime.selectgo",
645 "runtime/pprof.blockSelectRecvAsync",
646 "runtime/pprof.TestBlockProfile",
647 },
648 re: `
649 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
650 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
651 # 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
652 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
653 `},
654 {
655 name: "select send sync",
656 f: blockSelectSendSync,
657 stk: []string{
658 "runtime.selectgo",
659 "runtime/pprof.blockSelectSendSync",
660 "runtime/pprof.TestBlockProfile",
661 },
662 re: `
663 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
664 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
665 # 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
666 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
667 `},
668 {
669 name: "mutex",
670 f: blockMutex,
671 stk: []string{
672 "sync.(*Mutex).Lock",
673 "runtime/pprof.blockMutex",
674 "runtime/pprof.TestBlockProfile",
675 },
676 re: `
677 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
678 # 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+
679 # 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
680 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
681 `},
682 {
683 name: "cond",
684 f: blockCond,
685 stk: []string{
686 "sync.(*Cond).Wait",
687 "runtime/pprof.blockCond",
688 "runtime/pprof.TestBlockProfile",
689 },
690 re: `
691 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
692 # 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+
693 # 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
694 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
695 `},
696 }
697
698 // Generate block profile
699 runtime.SetBlockProfileRate(1)
700 defer runtime.SetBlockProfileRate(0)
701 for _, test := range tests {
702 test.f()
703 }
704
705 t.Run("debug=1", func(t *testing.T) {
706 var w bytes.Buffer
707 Lookup("block").WriteTo(&w, 1)
708 prof := w.String()
709
710 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
711 t.Fatalf("Bad profile header:\n%v", prof)
712 }
713
714 if strings.HasSuffix(prof, "#\t0x0\n\n") {
715 t.Errorf("Useless 0 suffix:\n%v", prof)
716 }
717
718 for _, test := range tests {
719 if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
720 t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
721 }
722 }
723 })
724
725 t.Run("proto", func(t *testing.T) {
726 // proto format
727 var w bytes.Buffer
728 Lookup("block").WriteTo(&w, 0)
729 p, err := profile.Parse(&w)
730 if err != nil {
731 t.Fatalf("failed to parse profile: %v", err)
732 }
733 t.Logf("parsed proto: %s", p)
734 if err := p.CheckValid(); err != nil {
735 t.Fatalf("invalid profile: %v", err)
736 }
737
738 stks := stacks(p)
739 for _, test := range tests {
740 if !containsStack(stks, test.stk) {
741 t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
742 }
743 }
744 })
745
746 }
747
748 func stacks(p *profile.Profile) (res [][]string) {
749 for _, s := range p.Sample {
750 var stk []string
751 for _, l := range s.Location {
752 for _, line := range l.Line {
753 stk = append(stk, line.Function.Name)
754 }
755 }
756 res = append(res, stk)
757 }
758 return res
759 }
760
761 func containsStack(got [][]string, want []string) bool {
762 for _, stk := range got {
763 if len(stk) < len(want) {
764 continue
765 }
766 for i, f := range want {
767 if f != stk[i] {
768 break
769 }
770 if i == len(want)-1 {
771 return true
772 }
773 }
774 }
775 return false
776 }
777
778 const blockDelay = 10 * time.Millisecond
779
780 func blockChanRecv() {
781 c := make(chan bool)
782 go func() {
783 time.Sleep(blockDelay)
784 c <- true
785 }()
786 <-c
787 }
788
789 func blockChanSend() {
790 c := make(chan bool)
791 go func() {
792 time.Sleep(blockDelay)
793 <-c
794 }()
795 c <- true
796 }
797
798 func blockChanClose() {
799 c := make(chan bool)
800 go func() {
801 time.Sleep(blockDelay)
802 close(c)
803 }()
804 <-c
805 }
806
807 func blockSelectRecvAsync() {
808 const numTries = 3
809 c := make(chan bool, 1)
810 c2 := make(chan bool, 1)
811 go func() {
812 for i := 0; i < numTries; i++ {
813 time.Sleep(blockDelay)
814 c <- true
815 }
816 }()
817 for i := 0; i < numTries; i++ {
818 select {
819 case <-c:
820 case <-c2:
821 }
822 }
823 }
824
825 func blockSelectSendSync() {
826 c := make(chan bool)
827 c2 := make(chan bool)
828 go func() {
829 time.Sleep(blockDelay)
830 <-c
831 }()
832 select {
833 case c <- true:
834 case c2 <- true:
835 }
836 }
837
838 func blockMutex() {
839 var mu sync.Mutex
840 mu.Lock()
841 go func() {
842 time.Sleep(blockDelay)
843 mu.Unlock()
844 }()
845 // Note: Unlock releases mu before recording the mutex event,
846 // so it's theoretically possible for this to proceed and
847 // capture the profile before the event is recorded. As long
848 // as this is blocked before the unlock happens, it's okay.
849 mu.Lock()
850 }
851
852 func blockCond() {
853 var mu sync.Mutex
854 c := sync.NewCond(&mu)
855 mu.Lock()
856 go func() {
857 time.Sleep(blockDelay)
858 mu.Lock()
859 c.Signal()
860 mu.Unlock()
861 }()
862 c.Wait()
863 mu.Unlock()
864 }
865
866 func TestMutexProfile(t *testing.T) {
867 // Generate mutex profile
868
869 old := runtime.SetMutexProfileFraction(1)
870 defer runtime.SetMutexProfileFraction(old)
871 if old != 0 {
872 t.Fatalf("need MutexProfileRate 0, got %d", old)
873 }
874
875 blockMutex()
876
877 t.Run("debug=1", func(t *testing.T) {
878 var w bytes.Buffer
879 Lookup("mutex").WriteTo(&w, 1)
880 prof := w.String()
881 t.Logf("received profile: %v", prof)
882
883 if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
884 t.Errorf("Bad profile header:\n%v", prof)
885 }
886 prof = strings.Trim(prof, "\n")
887 lines := strings.Split(prof, "\n")
888 if len(lines) != 6 {
889 t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
890 }
891 if len(lines) < 6 {
892 return
893 }
894 // checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
895 r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
896 //r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
897 if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
898 t.Errorf("%q didn't match %q", lines[3], r2)
899 }
900 if runtime.Compiler != "gccgo" {
901 r3 := "^#.*pprof.blockMutex.*$"
902 if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
903 t.Errorf("%q didn't match %q", lines[5], r3)
904 }
905 }
906 t.Logf(prof)
907 })
908 t.Run("proto", func(t *testing.T) {
909 // proto format
910 var w bytes.Buffer
911 Lookup("mutex").WriteTo(&w, 0)
912 p, err := profile.Parse(&w)
913 if err != nil {
914 t.Fatalf("failed to parse profile: %v", err)
915 }
916 t.Logf("parsed proto: %s", p)
917 if err := p.CheckValid(); err != nil {
918 t.Fatalf("invalid profile: %v", err)
919 }
920
921 stks := stacks(p)
922 for _, want := range [][]string{
923 // {"sync.(*Mutex).Unlock", "pprof.blockMutex.func1"},
924 {"sync.Mutex.Unlock", "pprof.blockMutex..func1"},
925 } {
926 if !containsStack(stks, want) {
927 t.Errorf("No matching stack entry for %+v", want)
928 }
929 }
930 })
931 }
932
933 func func1(c chan int) { <-c }
934 func func2(c chan int) { <-c }
935 func func3(c chan int) { <-c }
936 func func4(c chan int) { <-c }
937
938 func TestGoroutineCounts(t *testing.T) {
939 if runtime.Compiler == "gccgo" {
940 t.Skip("goroutine stacks not supported on gccgo")
941 }
942
943 // Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
944 // desired blocking point.
945 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
946
947 c := make(chan int)
948 for i := 0; i < 100; i++ {
949 switch {
950 case i%10 == 0:
951 go func1(c)
952 case i%2 == 0:
953 go func2(c)
954 default:
955 go func3(c)
956 }
957 // Let goroutines block on channel
958 for j := 0; j < 5; j++ {
959 runtime.Gosched()
960 }
961 }
962
963 var w bytes.Buffer
964 goroutineProf := Lookup("goroutine")
965
966 // Check debug profile
967 goroutineProf.WriteTo(&w, 1)
968 prof := w.String()
969
970 if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
971 t.Errorf("expected sorted goroutine counts:\n%s", prof)
972 }
973
974 // Check proto profile
975 w.Reset()
976 goroutineProf.WriteTo(&w, 0)
977 p, err := profile.Parse(&w)
978 if err != nil {
979 t.Errorf("error parsing protobuf profile: %v", err)
980 }
981 if err := p.CheckValid(); err != nil {
982 t.Errorf("protobuf profile is invalid: %v", err)
983 }
984 if !containsCounts(p, []int64{50, 40, 10, 1}) {
985 t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
986 []int64{50, 40, 10, 1}, p)
987 }
988
989 close(c)
990
991 time.Sleep(10 * time.Millisecond) // let goroutines exit
992 }
993
994 func containsInOrder(s string, all ...string) bool {
995 for _, t := range all {
996 i := strings.Index(s, t)
997 if i < 0 {
998 return false
999 }
1000 s = s[i+len(t):]
1001 }
1002 return true
1003 }
1004
1005 func containsCounts(prof *profile.Profile, counts []int64) bool {
1006 m := make(map[int64]int)
1007 for _, c := range counts {
1008 m[c]++
1009 }
1010 for _, s := range prof.Sample {
1011 // The count is the single value in the sample
1012 if len(s.Value) != 1 {
1013 return false
1014 }
1015 m[s.Value[0]]--
1016 }
1017 for _, n := range m {
1018 if n > 0 {
1019 return false
1020 }
1021 }
1022 return true
1023 }
1024
1025 var emptyCallStackTestRun int64
1026
1027 // Issue 18836.
1028 func TestEmptyCallStack(t *testing.T) {
1029 name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
1030 emptyCallStackTestRun++
1031
1032 t.Parallel()
1033 var buf bytes.Buffer
1034 p := NewProfile(name)
1035
1036 p.Add("foo", 47674)
1037 p.WriteTo(&buf, 1)
1038 p.Remove("foo")
1039 got := buf.String()
1040 prefix := name + " profile: total 1\n"
1041 if !strings.HasPrefix(got, prefix) {
1042 t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
1043 }
1044 lostevent := "lostProfileEvent"
1045 if !strings.Contains(got, lostevent) {
1046 t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
1047 }
1048 }
1049
1050 // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
1051 // and value and has funcname somewhere in the stack.
1052 func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
1053 semi := strings.Index(spec, ";")
1054 if semi == -1 {
1055 panic("no semicolon in key/value spec")
1056 }
1057 kv := strings.SplitN(spec[semi+1:], "=", 2)
1058 if len(kv) != 2 {
1059 panic("missing = in key/value spec")
1060 }
1061 if !contains(labels[kv[0]], kv[1]) {
1062 return false
1063 }
1064 return stackContains(spec[:semi], count, stk, labels)
1065 }
1066
1067 func TestCPUProfileLabel(t *testing.T) {
1068 testCPUProfile(t, stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
1069 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1070 cpuHogger(cpuHog1, &salt1, dur)
1071 })
1072 })
1073 }
1074
1075 func TestLabelRace(t *testing.T) {
1076 // Test the race detector annotations for synchronization
1077 // between settings labels and consuming them from the
1078 // profile.
1079 testCPUProfile(t, stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) {
1080 start := time.Now()
1081 var wg sync.WaitGroup
1082 for time.Since(start) < dur {
1083 var salts [10]int
1084 for i := 0; i < 10; i++ {
1085 wg.Add(1)
1086 go func(j int) {
1087 Do(context.Background(), Labels("key", "value"), func(context.Context) {
1088 cpuHogger(cpuHog1, &salts[j], time.Millisecond)
1089 })
1090 wg.Done()
1091 }(i)
1092 }
1093 wg.Wait()
1094 }
1095 })
1096 }
1097
1098 // Check that there is no deadlock when the program receives SIGPROF while in
1099 // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
1100 func TestAtomicLoadStore64(t *testing.T) {
1101 f, err := ioutil.TempFile("", "profatomic")
1102 if err != nil {
1103 t.Fatalf("TempFile: %v", err)
1104 }
1105 defer os.Remove(f.Name())
1106 defer f.Close()
1107
1108 if err := StartCPUProfile(f); err != nil {
1109 t.Fatal(err)
1110 }
1111 defer StopCPUProfile()
1112
1113 var flag uint64
1114 done := make(chan bool, 1)
1115
1116 go func() {
1117 for atomic.LoadUint64(&flag) == 0 {
1118 runtime.Gosched()
1119 }
1120 done <- true
1121 }()
1122 time.Sleep(50 * time.Millisecond)
1123 atomic.StoreUint64(&flag, 1)
1124 <-done
1125 }
1126
1127 func TestTracebackAll(t *testing.T) {
1128 // With gccgo, if a profiling signal arrives at the wrong time
1129 // during traceback, it may crash or hang. See issue #29448.
1130 f, err := ioutil.TempFile("", "proftraceback")
1131 if err != nil {
1132 t.Fatalf("TempFile: %v", err)
1133 }
1134 defer os.Remove(f.Name())
1135 defer f.Close()
1136
1137 if err := StartCPUProfile(f); err != nil {
1138 t.Fatal(err)
1139 }
1140 defer StopCPUProfile()
1141
1142 ch := make(chan int)
1143 defer close(ch)
1144
1145 count := 10
1146 for i := 0; i < count; i++ {
1147 go func() {
1148 <-ch // block
1149 }()
1150 }
1151
1152 N := 10000
1153 if testing.Short() {
1154 N = 500
1155 }
1156 buf := make([]byte, 10*1024)
1157 for i := 0; i < N; i++ {
1158 runtime.Stack(buf, true)
1159 }
1160 }
1161
1162 // TestTryAdd tests the cases that's hard to test with real program execution.
1163 // For example, the current go compilers may not inline functions involved in recursion
1164 // but that may not be true in the future compilers. This tests such cases by
1165 // using fake call sequences and forcing the profile build utilizing
1166 // translateCPUProfile defined in proto_test.go
1167 func TestTryAdd(t *testing.T) {
1168 inlinedCallerPtr := uint64(funcPC(inlinedCaller)) + 1
1169 inlinedCalleePtr, found := findInlinedCall(inlinedCaller, 4<<10)
1170 if !found {
1171 t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
1172 }
1173 inlinedCalleePtr += 1 // +1 to be safely inside of the function body.
1174
1175 period := int64(2000 * 1000) // 1/500*1e9 nanosec.
1176
1177 testCases := []struct {
1178 name string
1179 input []uint64 // following the input format assumed by profileBuilder.addCPUData.
1180 wantLocs [][]string // ordered location entries with function names.
1181 wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
1182 }{{
1183 name: "bug35538",
1184 input: []uint64{
1185 3, 0, 500, // hz = 500. Must match the period.
1186 7, 0, 10, inlinedCalleePtr, inlinedCallerPtr, inlinedCalleePtr, inlinedCallerPtr,
1187 5, 0, 20, inlinedCalleePtr, inlinedCallerPtr,
1188 },
1189 wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
1190 wantSamples: []*profile.Sample{
1191 {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
1192 {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
1193 },
1194 }, {
1195 name: "recursive_inlined_funcs",
1196 input: []uint64{
1197 3, 0, 500, // hz = 500. Must match the period.
1198 5, 0, 30, inlinedCalleePtr, inlinedCalleePtr,
1199 4, 0, 40, inlinedCalleePtr,
1200 },
1201 wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}},
1202 wantSamples: []*profile.Sample{
1203 {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
1204 {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}},
1205 },
1206 }, {
1207 name: "truncated_stack_trace_later",
1208 input: []uint64{
1209 3, 0, 500, // hz = 500. Must match the period.
1210 5, 0, 50, inlinedCalleePtr, inlinedCallerPtr,
1211 4, 0, 60, inlinedCalleePtr,
1212 },
1213 wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
1214 wantSamples: []*profile.Sample{
1215 {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
1216 {Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
1217 },
1218 }, {
1219 name: "truncated_stack_trace_first",
1220 input: []uint64{
1221 3, 0, 500, // hz = 500. Must match the period.
1222 4, 0, 70, inlinedCalleePtr,
1223 5, 0, 80, inlinedCalleePtr, inlinedCallerPtr,
1224 },
1225 wantLocs: [][]string{ // the inline info is screwed up, but better than a crash.
1226 {"runtime/pprof.inlinedCallee"},
1227 {"runtime/pprof.inlinedCaller"}},
1228 wantSamples: []*profile.Sample{
1229 {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
1230 {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
1231 },
1232 }}
1233
1234 for _, tc := range testCases {
1235 t.Run(tc.name, func(t *testing.T) {
1236 p, err := translateCPUProfile(tc.input)
1237 if err != nil {
1238 t.Fatalf("translating profile: %v", err)
1239 }
1240 t.Logf("Profile: %v\n", p)
1241
1242 // One location entry with all inlined functions.
1243 var gotLoc [][]string
1244 for _, loc := range p.Location {
1245 var names []string
1246 for _, line := range loc.Line {
1247 names = append(names, line.Function.Name)
1248 }
1249 gotLoc = append(gotLoc, names)
1250 }
1251 if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
1252 t.Errorf("Got Location = %+v\n\twant %+v", got, want)
1253 }
1254 // All samples should point to one location.
1255 var gotSamples []*profile.Sample
1256 for _, sample := range p.Sample {
1257 var locs []*profile.Location
1258 for _, loc := range sample.Location {
1259 locs = append(locs, &profile.Location{ID: loc.ID})
1260 }
1261 gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
1262 }
1263 if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
1264 t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
1265 }
1266 })
1267 }
1268 }