Source file src/runtime/pprof/pprof_test.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !js
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"internal/abi"
    14  	"internal/profile"
    15  	"internal/syscall/unix"
    16  	"internal/testenv"
    17  	"io"
    18  	"math"
    19  	"math/big"
    20  	"os"
    21  	"regexp"
    22  	"runtime"
    23  	"runtime/debug"
    24  	"strings"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  	_ "unsafe"
    30  )
    31  
    32  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
    33  	// We only need to get one 100 Hz clock tick, so we've got
    34  	// a large safety buffer.
    35  	// But do at least 500 iterations (which should take about 100ms),
    36  	// otherwise TestCPUProfileMultithreaded can fail if only one
    37  	// thread is scheduled during the testing period.
    38  	t0 := time.Now()
    39  	accum := *y
    40  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    41  		accum = f(accum)
    42  	}
    43  	*y = accum
    44  }
    45  
    46  var (
    47  	salt1 = 0
    48  	salt2 = 0
    49  )
    50  
    51  // The actual CPU hogging function.
    52  // Must not call other functions nor access heap/globals in the loop,
    53  // otherwise under race detector the samples will be in the race runtime.
    54  func cpuHog1(x int) int {
    55  	return cpuHog0(x, 1e5)
    56  }
    57  
    58  func cpuHog0(x, n int) int {
    59  	foo := x
    60  	for i := 0; i < n; i++ {
    61  		if foo > 0 {
    62  			foo *= foo
    63  		} else {
    64  			foo *= foo + 1
    65  		}
    66  	}
    67  	return foo
    68  }
    69  
    70  func cpuHog2(x int) int {
    71  	foo := x
    72  	for i := 0; i < 1e5; i++ {
    73  		if foo > 0 {
    74  			foo *= foo
    75  		} else {
    76  			foo *= foo + 2
    77  		}
    78  	}
    79  	return foo
    80  }
    81  
    82  // Return a list of functions that we don't want to ever appear in CPU
    83  // profiles. For gccgo, that list includes the sigprof handler itself.
    84  func avoidFunctions() []string {
    85  	if runtime.Compiler == "gccgo" {
    86  		return []string{"runtime.sigprof"}
    87  	}
    88  	return nil
    89  }
    90  
    91  func TestCPUProfile(t *testing.T) {
    92  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
    93  	testCPUProfile(t, matches, func(dur time.Duration) {
    94  		cpuHogger(cpuHog1, &salt1, dur)
    95  	})
    96  }
    97  
    98  func TestCPUProfileMultithreaded(t *testing.T) {
    99  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   100  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions())
   101  	testCPUProfile(t, matches, func(dur time.Duration) {
   102  		c := make(chan int)
   103  		go func() {
   104  			cpuHogger(cpuHog1, &salt1, dur)
   105  			c <- 1
   106  		}()
   107  		cpuHogger(cpuHog2, &salt2, dur)
   108  		<-c
   109  	})
   110  }
   111  
   112  func TestCPUProfileMultithreadMagnitude(t *testing.T) {
   113  	if runtime.GOOS != "linux" {
   114  		t.Skip("issue 35057 is only confirmed on Linux")
   115  	}
   116  
   117  	// Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly
   118  	// created threads, breaking our CPU accounting.
   119  	major, minor := unix.KernelVersion()
   120  	t.Logf("Running on Linux %d.%d", major, minor)
   121  	defer func() {
   122  		if t.Failed() {
   123  			t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
   124  		}
   125  	}()
   126  
   127  	// Disable on affected builders to avoid flakiness, but otherwise keep
   128  	// it enabled to potentially warn users that they are on a broken
   129  	// kernel.
   130  	if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
   131  		have59 := major > 5 || (major == 5 && minor >= 9)
   132  		have516 := major > 5 || (major == 5 && minor >= 16)
   133  		if have59 && !have516 {
   134  			testenv.SkipFlaky(t, 49065)
   135  		}
   136  	}
   137  
   138  	// Run a workload in a single goroutine, then run copies of the same
   139  	// workload in several goroutines. For both the serial and parallel cases,
   140  	// the CPU time the process measures with its own profiler should match the
   141  	// total CPU usage that the OS reports.
   142  	//
   143  	// We could also check that increases in parallelism (GOMAXPROCS) lead to a
   144  	// linear increase in the CPU usage reported by both the OS and the
   145  	// profiler, but without a guarantee of exclusive access to CPU resources
   146  	// that is likely to be a flaky test.
   147  
   148  	// Require the smaller value to be within 10%, or 40% in short mode.
   149  	maxDiff := 0.10
   150  	if testing.Short() {
   151  		maxDiff = 0.40
   152  	}
   153  
   154  	compare := func(a, b time.Duration, maxDiff float64) error {
   155  		if a <= 0 || b <= 0 {
   156  			return fmt.Errorf("Expected both time reports to be positive")
   157  		}
   158  
   159  		if a < b {
   160  			a, b = b, a
   161  		}
   162  
   163  		diff := float64(a-b) / float64(a)
   164  		if diff > maxDiff {
   165  			return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100)
   166  		}
   167  
   168  		return nil
   169  	}
   170  
   171  	for _, tc := range []struct {
   172  		name    string
   173  		workers int
   174  	}{
   175  		{
   176  			name:    "serial",
   177  			workers: 1,
   178  		},
   179  		{
   180  			name:    "parallel",
   181  			workers: runtime.GOMAXPROCS(0),
   182  		},
   183  	} {
   184  		// check that the OS's perspective matches what the Go runtime measures.
   185  		t.Run(tc.name, func(t *testing.T) {
   186  			t.Logf("Running with %d workers", tc.workers)
   187  
   188  			var userTime, systemTime time.Duration
   189  			matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions())
   190  			acceptProfile := func(t *testing.T, p *profile.Profile) bool {
   191  				if !matches(t, p) {
   192  					return false
   193  				}
   194  
   195  				ok := true
   196  				for i, unit := range []string{"count", "nanoseconds"} {
   197  					if have, want := p.SampleType[i].Unit, unit; have != want {
   198  						t.Logf("pN SampleType[%d]; %q != %q", i, have, want)
   199  						ok = false
   200  					}
   201  				}
   202  
   203  				// cpuHog1 called below is the primary source of CPU
   204  				// load, but there may be some background work by the
   205  				// runtime. Since the OS rusage measurement will
   206  				// include all work done by the process, also compare
   207  				// against all samples in our profile.
   208  				var value time.Duration
   209  				for _, sample := range p.Sample {
   210  					value += time.Duration(sample.Value[1]) * time.Nanosecond
   211  				}
   212  
   213  				totalTime := userTime + systemTime
   214  				t.Logf("compare %s user + %s system = %s vs %s", userTime, systemTime, totalTime, value)
   215  				if err := compare(totalTime, value, maxDiff); err != nil {
   216  					t.Logf("compare got %v want nil", err)
   217  					ok = false
   218  				}
   219  
   220  				return ok
   221  			}
   222  
   223  			testCPUProfile(t, acceptProfile, func(dur time.Duration) {
   224  				userTime, systemTime = diffCPUTime(t, func() {
   225  					var wg sync.WaitGroup
   226  					var once sync.Once
   227  					for i := 0; i < tc.workers; i++ {
   228  						wg.Add(1)
   229  						go func() {
   230  							defer wg.Done()
   231  							var salt = 0
   232  							cpuHogger(cpuHog1, &salt, dur)
   233  							once.Do(func() { salt1 = salt })
   234  						}()
   235  					}
   236  					wg.Wait()
   237  				})
   238  			})
   239  		})
   240  	}
   241  }
   242  
   243  // containsInlinedCall reports whether the function body for the function f is
   244  // known to contain an inlined function call within the first maxBytes bytes.
   245  func containsInlinedCall(f any, maxBytes int) bool {
   246  	_, found := findInlinedCall(f, maxBytes)
   247  	return found
   248  }
   249  
   250  // findInlinedCall returns the PC of an inlined function call within
   251  // the function body for the function f if any.
   252  func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) {
   253  	fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
   254  	if fFunc == nil || fFunc.Entry() == 0 {
   255  		panic("failed to locate function entry")
   256  	}
   257  
   258  	for offset := 0; offset < maxBytes; offset++ {
   259  		innerPC := fFunc.Entry() + uintptr(offset)
   260  		inner := runtime.FuncForPC(innerPC)
   261  		if inner == nil {
   262  			// No function known for this PC value.
   263  			// It might simply be misaligned, so keep searching.
   264  			continue
   265  		}
   266  		if inner.Entry() != fFunc.Entry() {
   267  			// Scanned past f and didn't find any inlined functions.
   268  			break
   269  		}
   270  		if inner.Name() != fFunc.Name() {
   271  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
   272  			// function inlined into f.
   273  			return uint64(innerPC), true
   274  		}
   275  	}
   276  
   277  	return 0, false
   278  }
   279  
   280  func TestCPUProfileInlining(t *testing.T) {
   281  	if !containsInlinedCall(inlinedCaller, 4<<10) {
   282  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
   283  	}
   284  
   285  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions())
   286  	p := testCPUProfile(t, matches, func(dur time.Duration) {
   287  		cpuHogger(inlinedCaller, &salt1, dur)
   288  	})
   289  
   290  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
   291  	for _, loc := range p.Location {
   292  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
   293  		for _, line := range loc.Line {
   294  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
   295  				hasInlinedCallee = true
   296  			}
   297  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
   298  				hasInlinedCallerAfterInlinedCallee = true
   299  			}
   300  		}
   301  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
   302  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
   303  		}
   304  	}
   305  }
   306  
   307  func inlinedCaller(x int) int {
   308  	x = inlinedCallee(x, 1e5)
   309  	return x
   310  }
   311  
   312  func inlinedCallee(x, n int) int {
   313  	return cpuHog0(x, n)
   314  }
   315  
   316  //go:noinline
   317  func dumpCallers(pcs []uintptr) {
   318  	if pcs == nil {
   319  		return
   320  	}
   321  
   322  	skip := 2 // Callers and dumpCallers
   323  	runtime.Callers(skip, pcs)
   324  }
   325  
   326  //go:noinline
   327  func inlinedCallerDump(pcs []uintptr) {
   328  	inlinedCalleeDump(pcs)
   329  }
   330  
   331  func inlinedCalleeDump(pcs []uintptr) {
   332  	dumpCallers(pcs)
   333  }
   334  
   335  type inlineWrapperInterface interface {
   336  	dump(stack []uintptr)
   337  }
   338  
   339  type inlineWrapper struct {
   340  }
   341  
   342  func (h inlineWrapper) dump(pcs []uintptr) {
   343  	dumpCallers(pcs)
   344  }
   345  
   346  func inlinedWrapperCallerDump(pcs []uintptr) {
   347  	var h inlineWrapperInterface
   348  	h = &inlineWrapper{}
   349  	h.dump(pcs)
   350  }
   351  
   352  func TestCPUProfileRecursion(t *testing.T) {
   353  	matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions())
   354  	p := testCPUProfile(t, matches, func(dur time.Duration) {
   355  		cpuHogger(recursionCaller, &salt1, dur)
   356  	})
   357  
   358  	// check the Location encoding was not confused by recursive calls.
   359  	for i, loc := range p.Location {
   360  		recursionFunc := 0
   361  		for _, line := range loc.Line {
   362  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
   363  				recursionFunc++
   364  			}
   365  		}
   366  		if recursionFunc > 1 {
   367  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
   368  		}
   369  	}
   370  }
   371  
   372  func recursionCaller(x int) int {
   373  	y := recursionCallee(3, x)
   374  	return y
   375  }
   376  
   377  func recursionCallee(n, x int) int {
   378  	if n == 0 {
   379  		return 1
   380  	}
   381  	y := inlinedCallee(x, 1e4)
   382  	return y * recursionCallee(n-1, x)
   383  }
   384  
   385  func recursionChainTop(x int, pcs []uintptr) {
   386  	if x < 0 {
   387  		return
   388  	}
   389  	recursionChainMiddle(x, pcs)
   390  }
   391  
   392  func recursionChainMiddle(x int, pcs []uintptr) {
   393  	recursionChainBottom(x, pcs)
   394  }
   395  
   396  func recursionChainBottom(x int, pcs []uintptr) {
   397  	// This will be called each time, we only care about the last. We
   398  	// can't make this conditional or this function won't be inlined.
   399  	dumpCallers(pcs)
   400  
   401  	recursionChainTop(x-1, pcs)
   402  }
   403  
   404  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
   405  	p, err := profile.Parse(bytes.NewReader(valBytes))
   406  	if err != nil {
   407  		t.Fatal(err)
   408  	}
   409  	for _, sample := range p.Sample {
   410  		count := uintptr(sample.Value[0])
   411  		f(count, sample.Location, sample.Label)
   412  	}
   413  	return p
   414  }
   415  
   416  func cpuProfilingBroken() bool {
   417  	switch runtime.GOOS {
   418  	case "plan9":
   419  		// Profiling unimplemented.
   420  		return true
   421  	case "aix":
   422  		// See https://golang.org/issue/45170.
   423  		return true
   424  	case "ios", "dragonfly", "netbsd", "illumos", "solaris":
   425  		// See https://golang.org/issue/13841.
   426  		return true
   427  	case "openbsd":
   428  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
   429  			// See https://golang.org/issue/13841.
   430  			return true
   431  		}
   432  	}
   433  
   434  	return false
   435  }
   436  
   437  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
   438  // as interpreted by matches, and returns the parsed profile.
   439  func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile {
   440  	switch runtime.GOOS {
   441  	case "darwin":
   442  		out, err := testenv.Command(t, "uname", "-a").CombinedOutput()
   443  		if err != nil {
   444  			t.Fatal(err)
   445  		}
   446  		vers := string(out)
   447  		t.Logf("uname -a: %v", vers)
   448  	case "plan9":
   449  		t.Skip("skipping on plan9")
   450  	case "wasip1":
   451  		t.Skip("skipping on wasip1")
   452  	}
   453  
   454  	broken := cpuProfilingBroken()
   455  
   456  	deadline, ok := t.Deadline()
   457  	if broken || !ok {
   458  		if broken && testing.Short() {
   459  			// If it's expected to be broken, no point waiting around.
   460  			deadline = time.Now().Add(1 * time.Second)
   461  		} else {
   462  			deadline = time.Now().Add(10 * time.Second)
   463  		}
   464  	}
   465  
   466  	// If we're running a long test, start with a long duration
   467  	// for tests that try to make sure something *doesn't* happen.
   468  	duration := 5 * time.Second
   469  	if testing.Short() {
   470  		duration = 100 * time.Millisecond
   471  	}
   472  
   473  	// Profiling tests are inherently flaky, especially on a
   474  	// loaded system, such as when this test is running with
   475  	// several others under go test std. If a test fails in a way
   476  	// that could mean it just didn't run long enough, try with a
   477  	// longer duration.
   478  	for {
   479  		var prof bytes.Buffer
   480  		if err := StartCPUProfile(&prof); err != nil {
   481  			t.Fatal(err)
   482  		}
   483  		f(duration)
   484  		StopCPUProfile()
   485  
   486  		if p, ok := profileOk(t, matches, prof, duration); ok {
   487  			return p
   488  		}
   489  
   490  		duration *= 2
   491  		if time.Until(deadline) < duration {
   492  			break
   493  		}
   494  		t.Logf("retrying with %s duration", duration)
   495  	}
   496  
   497  	if broken {
   498  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
   499  	}
   500  
   501  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   502  	// QEMU is not perfect at emulating everything.
   503  	// IN_QEMU environmental variable is set by some of the Go builders.
   504  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   505  	if os.Getenv("IN_QEMU") == "1" {
   506  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   507  	}
   508  	t.FailNow()
   509  	return nil
   510  }
   511  
   512  var diffCPUTimeImpl func(f func()) (user, system time.Duration)
   513  
   514  func diffCPUTime(t *testing.T, f func()) (user, system time.Duration) {
   515  	if fn := diffCPUTimeImpl; fn != nil {
   516  		return fn(f)
   517  	}
   518  	t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH)
   519  	return 0, 0
   520  }
   521  
   522  func contains(slice []string, s string) bool {
   523  	for i := range slice {
   524  		if slice[i] == s {
   525  			return true
   526  		}
   527  	}
   528  	return false
   529  }
   530  
   531  // stackContains matches if a function named spec appears anywhere in the stack trace.
   532  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   533  	for _, loc := range stk {
   534  		for _, line := range loc.Line {
   535  			if strings.Contains(line.Function.Name, spec) {
   536  				return true
   537  			}
   538  		}
   539  	}
   540  	return false
   541  }
   542  
   543  type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
   544  
   545  func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
   546  	ok = true
   547  
   548  	var samples uintptr
   549  	var buf strings.Builder
   550  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   551  		fmt.Fprintf(&buf, "%d:", count)
   552  		fprintStack(&buf, stk)
   553  		fmt.Fprintf(&buf, " labels: %v\n", labels)
   554  		samples += count
   555  		fmt.Fprintf(&buf, "\n")
   556  	})
   557  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   558  
   559  	if samples < 10 && runtime.GOOS == "windows" {
   560  		// On some windows machines we end up with
   561  		// not enough samples due to coarse timer
   562  		// resolution. Let it go.
   563  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   564  		return p, false
   565  	}
   566  
   567  	// Check that we got a reasonable number of samples.
   568  	// We used to always require at least ideal/4 samples,
   569  	// but that is too hard to guarantee on a loaded system.
   570  	// Now we accept 10 or more samples, which we take to be
   571  	// enough to show that at least some profiling is occurring.
   572  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   573  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   574  		ok = false
   575  	}
   576  
   577  	if matches != nil && !matches(t, p) {
   578  		ok = false
   579  	}
   580  
   581  	return p, ok
   582  }
   583  
   584  type profileMatchFunc func(*testing.T, *profile.Profile) bool
   585  
   586  func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc {
   587  	return func(t *testing.T, p *profile.Profile) (ok bool) {
   588  		ok = true
   589  
   590  		// Check that profile is well formed, contains 'need', and does not contain
   591  		// anything from 'avoid'.
   592  		have := make([]uintptr, len(need))
   593  		avoidSamples := make([]uintptr, len(avoid))
   594  
   595  		for _, sample := range p.Sample {
   596  			count := uintptr(sample.Value[0])
   597  			for i, spec := range need {
   598  				if matches(spec, count, sample.Location, sample.Label) {
   599  					have[i] += count
   600  				}
   601  			}
   602  			for i, name := range avoid {
   603  				for _, loc := range sample.Location {
   604  					for _, line := range loc.Line {
   605  						if strings.Contains(line.Function.Name, name) {
   606  							avoidSamples[i] += count
   607  						}
   608  					}
   609  				}
   610  			}
   611  		}
   612  
   613  		for i, name := range avoid {
   614  			bad := avoidSamples[i]
   615  			if bad != 0 {
   616  				t.Logf("found %d samples in avoid-function %s\n", bad, name)
   617  				ok = false
   618  			}
   619  		}
   620  
   621  		if len(need) == 0 {
   622  			return
   623  		}
   624  
   625  		var total uintptr
   626  		for i, name := range need {
   627  			total += have[i]
   628  			t.Logf("found %d samples in expected function %s\n", have[i], name)
   629  		}
   630  		if total == 0 {
   631  			t.Logf("no samples in expected functions")
   632  			ok = false
   633  		}
   634  
   635  		// We'd like to check a reasonable minimum, like
   636  		// total / len(have) / smallconstant, but this test is
   637  		// pretty flaky (see bug 7095).  So we'll just test to
   638  		// make sure we got at least one sample.
   639  		min := uintptr(1)
   640  		for i, name := range need {
   641  			if have[i] < min {
   642  				t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   643  				ok = false
   644  			}
   645  		}
   646  		return
   647  	}
   648  }
   649  
   650  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   651  // Ensure that we do not do this.
   652  func TestCPUProfileWithFork(t *testing.T) {
   653  	testenv.MustHaveExec(t)
   654  
   655  	exe, err := os.Executable()
   656  	if err != nil {
   657  		t.Fatal(err)
   658  	}
   659  
   660  	heap := 1 << 30
   661  	if runtime.GOOS == "android" {
   662  		// Use smaller size for Android to avoid crash.
   663  		heap = 100 << 20
   664  	}
   665  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
   666  		// Use smaller heap for Windows/ARM to avoid crash.
   667  		heap = 100 << 20
   668  	}
   669  	if testing.Short() {
   670  		heap = 100 << 20
   671  	}
   672  	// This makes fork slower.
   673  	garbage := make([]byte, heap)
   674  	// Need to touch the slice, otherwise it won't be paged in.
   675  	done := make(chan bool)
   676  	go func() {
   677  		for i := range garbage {
   678  			garbage[i] = 42
   679  		}
   680  		done <- true
   681  	}()
   682  	<-done
   683  
   684  	var prof bytes.Buffer
   685  	if err := StartCPUProfile(&prof); err != nil {
   686  		t.Fatal(err)
   687  	}
   688  	defer StopCPUProfile()
   689  
   690  	for i := 0; i < 10; i++ {
   691  		testenv.Command(t, exe, "-h").CombinedOutput()
   692  	}
   693  }
   694  
   695  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   696  // If it did, it would see inconsistent state and would either record an incorrect stack
   697  // or crash because the stack was malformed.
   698  func TestGoroutineSwitch(t *testing.T) {
   699  	if runtime.Compiler == "gccgo" {
   700  		t.Skip("not applicable for gccgo")
   701  	}
   702  	// How much to try. These defaults take about 1 seconds
   703  	// on a 2012 MacBook Pro. The ones in short mode take
   704  	// about 0.1 seconds.
   705  	tries := 10
   706  	count := 1000000
   707  	if testing.Short() {
   708  		tries = 1
   709  	}
   710  	for try := 0; try < tries; try++ {
   711  		var prof bytes.Buffer
   712  		if err := StartCPUProfile(&prof); err != nil {
   713  			t.Fatal(err)
   714  		}
   715  		for i := 0; i < count; i++ {
   716  			runtime.Gosched()
   717  		}
   718  		StopCPUProfile()
   719  
   720  		// Read profile to look for entries for gogo with an attempt at a traceback.
   721  		// "runtime.gogo" is OK, because that's the part of the context switch
   722  		// before the actual switch begins. But we should not see "gogo",
   723  		// aka "gogo<>(SB)", which does the actual switch and is marked SPWRITE.
   724  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   725  			// An entry with two frames with 'System' in its top frame
   726  			// exists to record a PC without a traceback. Those are okay.
   727  			if len(stk) == 2 {
   728  				name := stk[1].Line[0].Function.Name
   729  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   730  					return
   731  				}
   732  			}
   733  
   734  			// An entry with just one frame is OK too:
   735  			// it knew to stop at gogo.
   736  			if len(stk) == 1 {
   737  				return
   738  			}
   739  
   740  			// Otherwise, should not see gogo.
   741  			// The place we'd see it would be the inner most frame.
   742  			name := stk[0].Line[0].Function.Name
   743  			if name == "gogo" {
   744  				var buf strings.Builder
   745  				fprintStack(&buf, stk)
   746  				t.Fatalf("found profile entry for gogo:\n%s", buf.String())
   747  			}
   748  		})
   749  	}
   750  }
   751  
   752  func fprintStack(w io.Writer, stk []*profile.Location) {
   753  	if len(stk) == 0 {
   754  		fmt.Fprintf(w, " (stack empty)")
   755  	}
   756  	for _, loc := range stk {
   757  		fmt.Fprintf(w, " %#x", loc.Address)
   758  		fmt.Fprintf(w, " (")
   759  		for i, line := range loc.Line {
   760  			if i > 0 {
   761  				fmt.Fprintf(w, " ")
   762  			}
   763  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   764  		}
   765  		fmt.Fprintf(w, ")")
   766  	}
   767  }
   768  
   769  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   770  func TestMathBigDivide(t *testing.T) {
   771  	testCPUProfile(t, nil, func(duration time.Duration) {
   772  		t := time.After(duration)
   773  		pi := new(big.Int)
   774  		for {
   775  			for i := 0; i < 100; i++ {
   776  				n := big.NewInt(2646693125139304345)
   777  				d := big.NewInt(842468587426513207)
   778  				pi.Div(n, d)
   779  			}
   780  			select {
   781  			case <-t:
   782  				return
   783  			default:
   784  			}
   785  		}
   786  	})
   787  }
   788  
   789  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
   790  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   791  	for _, f := range strings.Split(spec, ",") {
   792  		if !stackContains(f, count, stk, labels) {
   793  			return false
   794  		}
   795  	}
   796  	return true
   797  }
   798  
   799  func TestMorestack(t *testing.T) {
   800  	matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions())
   801  	testCPUProfile(t, matches, func(duration time.Duration) {
   802  		t := time.After(duration)
   803  		c := make(chan bool)
   804  		for {
   805  			go func() {
   806  				growstack1()
   807  				c <- true
   808  			}()
   809  			select {
   810  			case <-t:
   811  				return
   812  			case <-c:
   813  			}
   814  		}
   815  	})
   816  }
   817  
   818  //go:noinline
   819  func growstack1() {
   820  	growstack(10)
   821  }
   822  
   823  //go:noinline
   824  func growstack(n int) {
   825  	var buf [8 << 18]byte
   826  	use(buf)
   827  	if n > 0 {
   828  		growstack(n - 1)
   829  	}
   830  }
   831  
   832  //go:noinline
   833  func use(x [8 << 18]byte) {}
   834  
   835  func TestBlockProfile(t *testing.T) {
   836  	type TestCase struct {
   837  		name string
   838  		f    func(*testing.T)
   839  		stk  []string
   840  		re   string
   841  	}
   842  	tests := [...]TestCase{
   843  		{
   844  			name: "chan recv",
   845  			f:    blockChanRecv,
   846  			stk: []string{
   847  				"runtime.chanrecv1",
   848  				"runtime/pprof.blockChanRecv",
   849  				"runtime/pprof.TestBlockProfile",
   850  			},
   851  			re: `
   852  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   853  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*runtime/chan.go:[0-9]+
   854  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   855  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   856  `},
   857  		{
   858  			name: "chan send",
   859  			f:    blockChanSend,
   860  			stk: []string{
   861  				"runtime.chansend1",
   862  				"runtime/pprof.blockChanSend",
   863  				"runtime/pprof.TestBlockProfile",
   864  			},
   865  			re: `
   866  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   867  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*runtime/chan.go:[0-9]+
   868  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   869  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   870  `},
   871  		{
   872  			name: "chan close",
   873  			f:    blockChanClose,
   874  			stk: []string{
   875  				"runtime.chanrecv1",
   876  				"runtime/pprof.blockChanClose",
   877  				"runtime/pprof.TestBlockProfile",
   878  			},
   879  			re: `
   880  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   881  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*runtime/chan.go:[0-9]+
   882  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   883  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   884  `},
   885  		{
   886  			name: "select recv async",
   887  			f:    blockSelectRecvAsync,
   888  			stk: []string{
   889  				"runtime.selectgo",
   890  				"runtime/pprof.blockSelectRecvAsync",
   891  				"runtime/pprof.TestBlockProfile",
   892  			},
   893  			re: `
   894  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   895  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*runtime/select.go:[0-9]+
   896  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   897  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   898  `},
   899  		{
   900  			name: "select send sync",
   901  			f:    blockSelectSendSync,
   902  			stk: []string{
   903  				"runtime.selectgo",
   904  				"runtime/pprof.blockSelectSendSync",
   905  				"runtime/pprof.TestBlockProfile",
   906  			},
   907  			re: `
   908  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   909  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*runtime/select.go:[0-9]+
   910  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   911  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   912  `},
   913  		{
   914  			name: "mutex",
   915  			f:    blockMutex,
   916  			stk: []string{
   917  				"sync.(*Mutex).Lock",
   918  				"runtime/pprof.blockMutex",
   919  				"runtime/pprof.TestBlockProfile",
   920  			},
   921  			re: `
   922  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   923  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*sync/mutex\.go:[0-9]+
   924  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   925  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   926  `},
   927  		{
   928  			name: "cond",
   929  			f:    blockCond,
   930  			stk: []string{
   931  				"sync.(*Cond).Wait",
   932  				"runtime/pprof.blockCond",
   933  				"runtime/pprof.TestBlockProfile",
   934  			},
   935  			re: `
   936  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   937  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*sync/cond\.go:[0-9]+
   938  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   939  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*runtime/pprof/pprof_test.go:[0-9]+
   940  `},
   941  	}
   942  
   943  	// Generate block profile
   944  	runtime.SetBlockProfileRate(1)
   945  	defer runtime.SetBlockProfileRate(0)
   946  	for _, test := range tests {
   947  		test.f(t)
   948  	}
   949  
   950  	t.Run("debug=1", func(t *testing.T) {
   951  		var w strings.Builder
   952  		Lookup("block").WriteTo(&w, 1)
   953  		prof := w.String()
   954  
   955  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   956  			t.Fatalf("Bad profile header:\n%v", prof)
   957  		}
   958  
   959  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
   960  			t.Errorf("Useless 0 suffix:\n%v", prof)
   961  		}
   962  
   963  		for _, test := range tests {
   964  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
   965  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   966  			}
   967  		}
   968  	})
   969  
   970  	t.Run("proto", func(t *testing.T) {
   971  		// proto format
   972  		var w bytes.Buffer
   973  		Lookup("block").WriteTo(&w, 0)
   974  		p, err := profile.Parse(&w)
   975  		if err != nil {
   976  			t.Fatalf("failed to parse profile: %v", err)
   977  		}
   978  		t.Logf("parsed proto: %s", p)
   979  		if err := p.CheckValid(); err != nil {
   980  			t.Fatalf("invalid profile: %v", err)
   981  		}
   982  
   983  		stks := stacks(p)
   984  		for _, test := range tests {
   985  			if !containsStack(stks, test.stk) {
   986  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
   987  			}
   988  		}
   989  	})
   990  
   991  }
   992  
   993  func stacks(p *profile.Profile) (res [][]string) {
   994  	for _, s := range p.Sample {
   995  		var stk []string
   996  		for _, l := range s.Location {
   997  			for _, line := range l.Line {
   998  				stk = append(stk, line.Function.Name)
   999  			}
  1000  		}
  1001  		res = append(res, stk)
  1002  	}
  1003  	return res
  1004  }
  1005  
  1006  func containsStack(got [][]string, want []string) bool {
  1007  	for _, stk := range got {
  1008  		if len(stk) < len(want) {
  1009  			continue
  1010  		}
  1011  		for i, f := range want {
  1012  			if f != stk[i] {
  1013  				break
  1014  			}
  1015  			if i == len(want)-1 {
  1016  				return true
  1017  			}
  1018  		}
  1019  	}
  1020  	return false
  1021  }
  1022  
  1023  // awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
  1024  // shows a goroutine in the given state with a stack frame in
  1025  // runtime/pprof.<fName>.
  1026  func awaitBlockedGoroutine(t *testing.T, state, fName string, count int) {
  1027  	re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
  1028  	r := regexp.MustCompile(re)
  1029  
  1030  	if deadline, ok := t.Deadline(); ok {
  1031  		if d := time.Until(deadline); d > 1*time.Second {
  1032  			timer := time.AfterFunc(d-1*time.Second, func() {
  1033  				debug.SetTraceback("all")
  1034  				panic(fmt.Sprintf("timed out waiting for %#q", re))
  1035  			})
  1036  			defer timer.Stop()
  1037  		}
  1038  	}
  1039  
  1040  	buf := make([]byte, 64<<10)
  1041  	for {
  1042  		runtime.Gosched()
  1043  		n := runtime.Stack(buf, true)
  1044  		if n == len(buf) {
  1045  			// Buffer wasn't large enough for a full goroutine dump.
  1046  			// Resize it and try again.
  1047  			buf = make([]byte, 2*len(buf))
  1048  			continue
  1049  		}
  1050  		if len(r.FindAll(buf[:n], -1)) >= count {
  1051  			return
  1052  		}
  1053  	}
  1054  }
  1055  
  1056  func blockChanRecv(t *testing.T) {
  1057  	c := make(chan bool)
  1058  	go func() {
  1059  		awaitBlockedGoroutine(t, "chan receive", "blockChanRecv", 1)
  1060  		c <- true
  1061  	}()
  1062  	<-c
  1063  }
  1064  
  1065  func blockChanSend(t *testing.T) {
  1066  	c := make(chan bool)
  1067  	go func() {
  1068  		awaitBlockedGoroutine(t, "chan send", "blockChanSend", 1)
  1069  		<-c
  1070  	}()
  1071  	c <- true
  1072  }
  1073  
  1074  func blockChanClose(t *testing.T) {
  1075  	c := make(chan bool)
  1076  	go func() {
  1077  		awaitBlockedGoroutine(t, "chan receive", "blockChanClose", 1)
  1078  		close(c)
  1079  	}()
  1080  	<-c
  1081  }
  1082  
  1083  func blockSelectRecvAsync(t *testing.T) {
  1084  	const numTries = 3
  1085  	c := make(chan bool, 1)
  1086  	c2 := make(chan bool, 1)
  1087  	go func() {
  1088  		for i := 0; i < numTries; i++ {
  1089  			awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync", 1)
  1090  			c <- true
  1091  		}
  1092  	}()
  1093  	for i := 0; i < numTries; i++ {
  1094  		select {
  1095  		case <-c:
  1096  		case <-c2:
  1097  		}
  1098  	}
  1099  }
  1100  
  1101  func blockSelectSendSync(t *testing.T) {
  1102  	c := make(chan bool)
  1103  	c2 := make(chan bool)
  1104  	go func() {
  1105  		awaitBlockedGoroutine(t, "select", "blockSelectSendSync", 1)
  1106  		<-c
  1107  	}()
  1108  	select {
  1109  	case c <- true:
  1110  	case c2 <- true:
  1111  	}
  1112  }
  1113  
  1114  func blockMutex(t *testing.T) {
  1115  	var mu sync.Mutex
  1116  	mu.Lock()
  1117  	go func() {
  1118  		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", 1)
  1119  		mu.Unlock()
  1120  	}()
  1121  	// Note: Unlock releases mu before recording the mutex event,
  1122  	// so it's theoretically possible for this to proceed and
  1123  	// capture the profile before the event is recorded. As long
  1124  	// as this is blocked before the unlock happens, it's okay.
  1125  	mu.Lock()
  1126  }
  1127  
  1128  func blockMutexN(t *testing.T, n int, d time.Duration) {
  1129  	var wg sync.WaitGroup
  1130  	var mu sync.Mutex
  1131  	mu.Lock()
  1132  	go func() {
  1133  		awaitBlockedGoroutine(t, "sync.Mutex.Lock", "blockMutex", n)
  1134  		time.Sleep(d)
  1135  		mu.Unlock()
  1136  	}()
  1137  	// Note: Unlock releases mu before recording the mutex event,
  1138  	// so it's theoretically possible for this to proceed and
  1139  	// capture the profile before the event is recorded. As long
  1140  	// as this is blocked before the unlock happens, it's okay.
  1141  	for i := 0; i < n; i++ {
  1142  		wg.Add(1)
  1143  		go func() {
  1144  			defer wg.Done()
  1145  			mu.Lock()
  1146  			mu.Unlock()
  1147  		}()
  1148  	}
  1149  	wg.Wait()
  1150  }
  1151  
  1152  func blockCond(t *testing.T) {
  1153  	var mu sync.Mutex
  1154  	c := sync.NewCond(&mu)
  1155  	mu.Lock()
  1156  	go func() {
  1157  		awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond", 1)
  1158  		mu.Lock()
  1159  		c.Signal()
  1160  		mu.Unlock()
  1161  	}()
  1162  	c.Wait()
  1163  	mu.Unlock()
  1164  }
  1165  
  1166  // See http://golang.org/cl/299991.
  1167  func TestBlockProfileBias(t *testing.T) {
  1168  	rate := int(1000) // arbitrary value
  1169  	runtime.SetBlockProfileRate(rate)
  1170  	defer runtime.SetBlockProfileRate(0)
  1171  
  1172  	// simulate blocking events
  1173  	blockFrequentShort(rate)
  1174  	blockInfrequentLong(rate)
  1175  
  1176  	var w bytes.Buffer
  1177  	Lookup("block").WriteTo(&w, 0)
  1178  	p, err := profile.Parse(&w)
  1179  	if err != nil {
  1180  		t.Fatalf("failed to parse profile: %v", err)
  1181  	}
  1182  	t.Logf("parsed proto: %s", p)
  1183  
  1184  	il := float64(-1) // blockInfrequentLong duration
  1185  	fs := float64(-1) // blockFrequentShort duration
  1186  	for _, s := range p.Sample {
  1187  		for _, l := range s.Location {
  1188  			for _, line := range l.Line {
  1189  				if len(s.Value) < 2 {
  1190  					t.Fatal("block profile has less than 2 sample types")
  1191  				}
  1192  
  1193  				if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
  1194  					il = float64(s.Value[1])
  1195  				} else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
  1196  					fs = float64(s.Value[1])
  1197  				}
  1198  			}
  1199  		}
  1200  	}
  1201  	if il == -1 || fs == -1 {
  1202  		t.Fatal("block profile is missing expected functions")
  1203  	}
  1204  
  1205  	// stddev of bias from 100 runs on local machine multiplied by 10x
  1206  	const threshold = 0.2
  1207  	if bias := (il - fs) / il; math.Abs(bias) > threshold {
  1208  		t.Fatalf("bias: abs(%f) > %f", bias, threshold)
  1209  	} else {
  1210  		t.Logf("bias: abs(%f) < %f", bias, threshold)
  1211  	}
  1212  }
  1213  
  1214  // blockFrequentShort produces 100000 block events with an average duration of
  1215  // rate / 10.
  1216  func blockFrequentShort(rate int) {
  1217  	for i := 0; i < 100000; i++ {
  1218  		blockevent(int64(rate/10), 1)
  1219  	}
  1220  }
  1221  
  1222  // blockFrequentShort produces 10000 block events with an average duration of
  1223  // rate.
  1224  func blockInfrequentLong(rate int) {
  1225  	for i := 0; i < 10000; i++ {
  1226  		blockevent(int64(rate), 1)
  1227  	}
  1228  }
  1229  
  1230  // Used by TestBlockProfileBias.
  1231  //
  1232  //go:linkname blockevent runtime.blockevent
  1233  func blockevent(cycles int64, skip int)
  1234  
  1235  func TestMutexProfile(t *testing.T) {
  1236  	// Generate mutex profile
  1237  
  1238  	old := runtime.SetMutexProfileFraction(1)
  1239  	defer runtime.SetMutexProfileFraction(old)
  1240  	if old != 0 {
  1241  		t.Fatalf("need MutexProfileRate 0, got %d", old)
  1242  	}
  1243  
  1244  	const (
  1245  		N = 100
  1246  		D = 100 * time.Millisecond
  1247  	)
  1248  	start := time.Now()
  1249  	blockMutexN(t, N, D)
  1250  	blockMutexNTime := time.Since(start)
  1251  
  1252  	t.Run("debug=1", func(t *testing.T) {
  1253  		var w strings.Builder
  1254  		Lookup("mutex").WriteTo(&w, 1)
  1255  		prof := w.String()
  1256  		t.Logf("received profile: %v", prof)
  1257  
  1258  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
  1259  			t.Errorf("Bad profile header:\n%v", prof)
  1260  		}
  1261  		prof = strings.Trim(prof, "\n")
  1262  		lines := strings.Split(prof, "\n")
  1263  		if len(lines) < 6 {
  1264  			t.Fatalf("expected >=6 lines, got %d %q\n%s", len(lines), prof, prof)
  1265  		}
  1266  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
  1267  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
  1268  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
  1269  			t.Errorf("%q didn't match %q", lines[3], r2)
  1270  		}
  1271  		r3 := "^#.*runtime/pprof.blockMutex.*$"
  1272  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
  1273  			t.Errorf("%q didn't match %q", lines[5], r3)
  1274  		}
  1275  		t.Logf(prof)
  1276  	})
  1277  	t.Run("proto", func(t *testing.T) {
  1278  		// proto format
  1279  		var w bytes.Buffer
  1280  		Lookup("mutex").WriteTo(&w, 0)
  1281  		p, err := profile.Parse(&w)
  1282  		if err != nil {
  1283  			t.Fatalf("failed to parse profile: %v", err)
  1284  		}
  1285  		t.Logf("parsed proto: %s", p)
  1286  		if err := p.CheckValid(); err != nil {
  1287  			t.Fatalf("invalid profile: %v", err)
  1288  		}
  1289  
  1290  		stks := stacks(p)
  1291  		for _, want := range [][]string{
  1292  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
  1293  		} {
  1294  			if !containsStack(stks, want) {
  1295  				t.Errorf("No matching stack entry for %+v", want)
  1296  			}
  1297  		}
  1298  
  1299  		i := 0
  1300  		for ; i < len(p.SampleType); i++ {
  1301  			if p.SampleType[i].Unit == "nanoseconds" {
  1302  				break
  1303  			}
  1304  		}
  1305  		if i >= len(p.SampleType) {
  1306  			t.Fatalf("profile did not contain nanoseconds sample")
  1307  		}
  1308  		total := int64(0)
  1309  		for _, s := range p.Sample {
  1310  			total += s.Value[i]
  1311  		}
  1312  		// Want d to be at least N*D, but give some wiggle-room to avoid
  1313  		// a test flaking. Set an upper-bound proportional to the total
  1314  		// wall time spent in blockMutexN. Generally speaking, the total
  1315  		// contention time could be arbitrarily high when considering
  1316  		// OS scheduler delays, or any other delays from the environment:
  1317  		// time keeps ticking during these delays. By making the upper
  1318  		// bound proportional to the wall time in blockMutexN, in theory
  1319  		// we're accounting for all these possible delays.
  1320  		d := time.Duration(total)
  1321  		lo := time.Duration(N * D * 9 / 10)
  1322  		hi := time.Duration(N) * blockMutexNTime * 11 / 10
  1323  		if d < lo || d > hi {
  1324  			for _, s := range p.Sample {
  1325  				t.Logf("sample: %s", time.Duration(s.Value[i]))
  1326  			}
  1327  			t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
  1328  		}
  1329  	})
  1330  }
  1331  
  1332  func TestMutexProfileRateAdjust(t *testing.T) {
  1333  	old := runtime.SetMutexProfileFraction(1)
  1334  	defer runtime.SetMutexProfileFraction(old)
  1335  	if old != 0 {
  1336  		t.Fatalf("need MutexProfileRate 0, got %d", old)
  1337  	}
  1338  
  1339  	readProfile := func() (contentions int64, delay int64) {
  1340  		var w bytes.Buffer
  1341  		Lookup("mutex").WriteTo(&w, 0)
  1342  		p, err := profile.Parse(&w)
  1343  		if err != nil {
  1344  			t.Fatalf("failed to parse profile: %v", err)
  1345  		}
  1346  		t.Logf("parsed proto: %s", p)
  1347  		if err := p.CheckValid(); err != nil {
  1348  			t.Fatalf("invalid profile: %v", err)
  1349  		}
  1350  
  1351  		for _, s := range p.Sample {
  1352  			for _, l := range s.Location {
  1353  				for _, line := range l.Line {
  1354  					if line.Function.Name == "runtime/pprof.blockMutex.func1" {
  1355  						contentions += s.Value[0]
  1356  						delay += s.Value[1]
  1357  					}
  1358  				}
  1359  			}
  1360  		}
  1361  		return
  1362  	}
  1363  
  1364  	blockMutex(t)
  1365  	contentions, delay := readProfile()
  1366  	if contentions == 0 || delay == 0 {
  1367  		t.Fatal("did not see expected function in profile")
  1368  	}
  1369  	runtime.SetMutexProfileFraction(0)
  1370  	newContentions, newDelay := readProfile()
  1371  	if newContentions != contentions || newDelay != delay {
  1372  		t.Fatalf("sample value changed: got [%d, %d], want [%d, %d]", newContentions, newDelay, contentions, delay)
  1373  	}
  1374  }
  1375  
  1376  func func1(c chan int) { <-c }
  1377  func func2(c chan int) { <-c }
  1378  func func3(c chan int) { <-c }
  1379  func func4(c chan int) { <-c }
  1380  
  1381  func TestGoroutineCounts(t *testing.T) {
  1382  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
  1383  	// desired blocking point.
  1384  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
  1385  
  1386  	c := make(chan int)
  1387  	for i := 0; i < 100; i++ {
  1388  		switch {
  1389  		case i%10 == 0:
  1390  			go func1(c)
  1391  		case i%2 == 0:
  1392  			go func2(c)
  1393  		default:
  1394  			go func3(c)
  1395  		}
  1396  		// Let goroutines block on channel
  1397  		for j := 0; j < 5; j++ {
  1398  			runtime.Gosched()
  1399  		}
  1400  	}
  1401  	ctx := context.Background()
  1402  
  1403  	// ... and again, with labels this time (just with fewer iterations to keep
  1404  	// sorting deterministic).
  1405  	Do(ctx, Labels("label", "value"), func(context.Context) {
  1406  		for i := 0; i < 89; i++ {
  1407  			switch {
  1408  			case i%10 == 0:
  1409  				go func1(c)
  1410  			case i%2 == 0:
  1411  				go func2(c)
  1412  			default:
  1413  				go func3(c)
  1414  			}
  1415  			// Let goroutines block on channel
  1416  			for j := 0; j < 5; j++ {
  1417  				runtime.Gosched()
  1418  			}
  1419  		}
  1420  	})
  1421  
  1422  	SetGoroutineLabels(WithLabels(context.Background(), Labels("self-label", "self-value")))
  1423  	defer SetGoroutineLabels(context.Background())
  1424  
  1425  	garbage := new(*int)
  1426  	fingReady := make(chan struct{})
  1427  	runtime.SetFinalizer(garbage, func(v **int) {
  1428  		Do(context.Background(), Labels("fing-label", "fing-value"), func(ctx context.Context) {
  1429  			close(fingReady)
  1430  			<-c
  1431  		})
  1432  	})
  1433  	garbage = nil
  1434  	for i := 0; i < 2; i++ {
  1435  		runtime.GC()
  1436  	}
  1437  	<-fingReady
  1438  
  1439  	var w bytes.Buffer
  1440  	goroutineProf := Lookup("goroutine")
  1441  
  1442  	// Check debug profile
  1443  	goroutineProf.WriteTo(&w, 1)
  1444  	prof := w.String()
  1445  
  1446  	labels := labelMap{"label": "value"}
  1447  	labelStr := "\n# labels: " + labels.String()
  1448  	selfLabel := labelMap{"self-label": "self-value"}
  1449  	selfLabelStr := "\n# labels: " + selfLabel.String()
  1450  	fingLabel := labelMap{"fing-label": "fing-value"}
  1451  	fingLabelStr := "\n# labels: " + fingLabel.String()
  1452  	orderedPrefix := []string{
  1453  		"\n50 @ ",
  1454  		"\n44 @", labelStr,
  1455  		"\n40 @",
  1456  		"\n36 @", labelStr,
  1457  		"\n10 @",
  1458  		"\n9 @", labelStr,
  1459  		"\n1 @"}
  1460  	if !containsInOrder(prof, append(orderedPrefix, selfLabelStr)...) {
  1461  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1462  	}
  1463  	if !containsInOrder(prof, append(orderedPrefix, fingLabelStr)...) {
  1464  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1465  	}
  1466  
  1467  	// Check proto profile
  1468  	w.Reset()
  1469  	goroutineProf.WriteTo(&w, 0)
  1470  	p, err := profile.Parse(&w)
  1471  	if err != nil {
  1472  		t.Errorf("error parsing protobuf profile: %v", err)
  1473  	}
  1474  	if err := p.CheckValid(); err != nil {
  1475  		t.Errorf("protobuf profile is invalid: %v", err)
  1476  	}
  1477  	expectedLabels := map[int64]map[string]string{
  1478  		50: {},
  1479  		44: {"label": "value"},
  1480  		40: {},
  1481  		36: {"label": "value"},
  1482  		10: {},
  1483  		9:  {"label": "value"},
  1484  		1:  {"self-label": "self-value", "fing-label": "fing-value"},
  1485  	}
  1486  	if !containsCountsLabels(p, expectedLabels) {
  1487  		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
  1488  			expectedLabels, p)
  1489  	}
  1490  
  1491  	close(c)
  1492  
  1493  	time.Sleep(10 * time.Millisecond) // let goroutines exit
  1494  }
  1495  
  1496  func containsInOrder(s string, all ...string) bool {
  1497  	for _, t := range all {
  1498  		var ok bool
  1499  		if _, s, ok = strings.Cut(s, t); !ok {
  1500  			return false
  1501  		}
  1502  	}
  1503  	return true
  1504  }
  1505  
  1506  func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
  1507  	m := make(map[int64]int)
  1508  	type nkey struct {
  1509  		count    int64
  1510  		key, val string
  1511  	}
  1512  	n := make(map[nkey]int)
  1513  	for c, kv := range countLabels {
  1514  		m[c]++
  1515  		for k, v := range kv {
  1516  			n[nkey{
  1517  				count: c,
  1518  				key:   k,
  1519  				val:   v,
  1520  			}]++
  1521  
  1522  		}
  1523  	}
  1524  	for _, s := range prof.Sample {
  1525  		// The count is the single value in the sample
  1526  		if len(s.Value) != 1 {
  1527  			return false
  1528  		}
  1529  		m[s.Value[0]]--
  1530  		for k, vs := range s.Label {
  1531  			for _, v := range vs {
  1532  				n[nkey{
  1533  					count: s.Value[0],
  1534  					key:   k,
  1535  					val:   v,
  1536  				}]--
  1537  			}
  1538  		}
  1539  	}
  1540  	for _, n := range m {
  1541  		if n > 0 {
  1542  			return false
  1543  		}
  1544  	}
  1545  	for _, ncnt := range n {
  1546  		if ncnt != 0 {
  1547  			return false
  1548  		}
  1549  	}
  1550  	return true
  1551  }
  1552  
  1553  func TestGoroutineProfileConcurrency(t *testing.T) {
  1554  	testenv.MustHaveParallelism(t)
  1555  
  1556  	goroutineProf := Lookup("goroutine")
  1557  
  1558  	profilerCalls := func(s string) int {
  1559  		return strings.Count(s, "\truntime/pprof.runtime_goroutineProfileWithLabels+")
  1560  	}
  1561  
  1562  	includesFinalizer := func(s string) bool {
  1563  		return strings.Contains(s, "runtime.runfinq")
  1564  	}
  1565  
  1566  	// Concurrent calls to the goroutine profiler should not trigger data races
  1567  	// or corruption.
  1568  	t.Run("overlapping profile requests", func(t *testing.T) {
  1569  		ctx := context.Background()
  1570  		ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
  1571  		defer cancel()
  1572  
  1573  		var wg sync.WaitGroup
  1574  		for i := 0; i < 2; i++ {
  1575  			wg.Add(1)
  1576  			Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) {
  1577  				go func() {
  1578  					defer wg.Done()
  1579  					for ctx.Err() == nil {
  1580  						var w strings.Builder
  1581  						goroutineProf.WriteTo(&w, 1)
  1582  						prof := w.String()
  1583  						count := profilerCalls(prof)
  1584  						if count >= 2 {
  1585  							t.Logf("prof %d\n%s", count, prof)
  1586  							cancel()
  1587  						}
  1588  					}
  1589  				}()
  1590  			})
  1591  		}
  1592  		wg.Wait()
  1593  	})
  1594  
  1595  	// The finalizer goroutine should not show up in most profiles, since it's
  1596  	// marked as a system goroutine when idle.
  1597  	t.Run("finalizer not present", func(t *testing.T) {
  1598  		var w strings.Builder
  1599  		goroutineProf.WriteTo(&w, 1)
  1600  		prof := w.String()
  1601  		if includesFinalizer(prof) {
  1602  			t.Errorf("profile includes finalizer (but finalizer should be marked as system):\n%s", prof)
  1603  		}
  1604  	})
  1605  
  1606  	// The finalizer goroutine should show up when it's running user code.
  1607  	t.Run("finalizer present", func(t *testing.T) {
  1608  		// T is a pointer type so it won't be allocated by the tiny
  1609  		// allocator, which can lead to its finalizer not being called
  1610  		// during this test
  1611  		type T *byte
  1612  		obj := new(T)
  1613  		ch1, ch2 := make(chan int), make(chan int)
  1614  		defer close(ch2)
  1615  		runtime.SetFinalizer(obj, func(_ interface{}) {
  1616  			close(ch1)
  1617  			<-ch2
  1618  		})
  1619  		obj = nil
  1620  		for i := 10; i >= 0; i-- {
  1621  			select {
  1622  			case <-ch1:
  1623  			default:
  1624  				if i == 0 {
  1625  					t.Fatalf("finalizer did not run")
  1626  				}
  1627  				runtime.GC()
  1628  			}
  1629  		}
  1630  		var w strings.Builder
  1631  		goroutineProf.WriteTo(&w, 1)
  1632  		prof := w.String()
  1633  		if !includesFinalizer(prof) {
  1634  			t.Errorf("profile does not include finalizer (and it should be marked as user):\n%s", prof)
  1635  		}
  1636  	})
  1637  
  1638  	// Check that new goroutines only show up in order.
  1639  	testLaunches := func(t *testing.T) {
  1640  		var done sync.WaitGroup
  1641  		defer done.Wait()
  1642  
  1643  		ctx := context.Background()
  1644  		ctx, cancel := context.WithCancel(ctx)
  1645  		defer cancel()
  1646  
  1647  		ch := make(chan int)
  1648  		defer close(ch)
  1649  
  1650  		var ready sync.WaitGroup
  1651  
  1652  		// These goroutines all survive until the end of the subtest, so we can
  1653  		// check that a (numbered) goroutine appearing in the profile implies
  1654  		// that all older goroutines also appear in the profile.
  1655  		ready.Add(1)
  1656  		done.Add(1)
  1657  		go func() {
  1658  			defer done.Done()
  1659  			for i := 0; ctx.Err() == nil; i++ {
  1660  				// Use SetGoroutineLabels rather than Do we can always expect an
  1661  				// extra goroutine (this one) with most recent label.
  1662  				SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-loop-i", fmt.Sprint(i))))
  1663  				done.Add(1)
  1664  				go func() {
  1665  					<-ch
  1666  					done.Done()
  1667  				}()
  1668  				for j := 0; j < i; j++ {
  1669  					// Spin for longer and longer as the test goes on. This
  1670  					// goroutine will do O(N^2) work with the number of
  1671  					// goroutines it launches. This should be slow relative to
  1672  					// the work involved in collecting a goroutine profile,
  1673  					// which is O(N) with the high-water mark of the number of
  1674  					// goroutines in this process (in the allgs slice).
  1675  					runtime.Gosched()
  1676  				}
  1677  				if i == 0 {
  1678  					ready.Done()
  1679  				}
  1680  			}
  1681  		}()
  1682  
  1683  		// Short-lived goroutines exercise different code paths (goroutines with
  1684  		// status _Gdead, for instance). This churn doesn't have behavior that
  1685  		// we can test directly, but does help to shake out data races.
  1686  		ready.Add(1)
  1687  		var churn func(i int)
  1688  		churn = func(i int) {
  1689  			SetGoroutineLabels(WithLabels(ctx, Labels(t.Name()+"-churn-i", fmt.Sprint(i))))
  1690  			if i == 0 {
  1691  				ready.Done()
  1692  			} else if i%16 == 0 {
  1693  				// Yield on occasion so this sequence of goroutine launches
  1694  				// doesn't monopolize a P. See issue #52934.
  1695  				runtime.Gosched()
  1696  			}
  1697  			if ctx.Err() == nil {
  1698  				go churn(i + 1)
  1699  			}
  1700  		}
  1701  		go func() {
  1702  			churn(0)
  1703  		}()
  1704  
  1705  		ready.Wait()
  1706  
  1707  		var w [3]bytes.Buffer
  1708  		for i := range w {
  1709  			goroutineProf.WriteTo(&w[i], 0)
  1710  		}
  1711  		for i := range w {
  1712  			p, err := profile.Parse(bytes.NewReader(w[i].Bytes()))
  1713  			if err != nil {
  1714  				t.Errorf("error parsing protobuf profile: %v", err)
  1715  			}
  1716  
  1717  			// High-numbered loop-i goroutines imply that every lower-numbered
  1718  			// loop-i goroutine should be present in the profile too.
  1719  			counts := make(map[string]int)
  1720  			for _, s := range p.Sample {
  1721  				label := s.Label[t.Name()+"-loop-i"]
  1722  				if len(label) > 0 {
  1723  					counts[label[0]]++
  1724  				}
  1725  			}
  1726  			for j, max := 0, len(counts)-1; j <= max; j++ {
  1727  				n := counts[fmt.Sprint(j)]
  1728  				if n == 1 || (n == 2 && j == max) {
  1729  					continue
  1730  				}
  1731  				t.Errorf("profile #%d's goroutines with label loop-i:%d; %d != 1 (or 2 for the last entry, %d)",
  1732  					i+1, j, n, max)
  1733  				t.Logf("counts %v", counts)
  1734  				break
  1735  			}
  1736  		}
  1737  	}
  1738  
  1739  	runs := 100
  1740  	if testing.Short() {
  1741  		runs = 5
  1742  	}
  1743  	for i := 0; i < runs; i++ {
  1744  		// Run multiple times to shake out data races
  1745  		t.Run("goroutine launches", testLaunches)
  1746  	}
  1747  }
  1748  
  1749  func BenchmarkGoroutine(b *testing.B) {
  1750  	withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
  1751  		return func(b *testing.B) {
  1752  			c := make(chan int)
  1753  			var ready, done sync.WaitGroup
  1754  			defer func() {
  1755  				close(c)
  1756  				done.Wait()
  1757  			}()
  1758  
  1759  			for i := 0; i < n; i++ {
  1760  				ready.Add(1)
  1761  				done.Add(1)
  1762  				go func() {
  1763  					ready.Done()
  1764  					<-c
  1765  					done.Done()
  1766  				}()
  1767  			}
  1768  			// Let goroutines block on channel
  1769  			ready.Wait()
  1770  			for i := 0; i < 5; i++ {
  1771  				runtime.Gosched()
  1772  			}
  1773  
  1774  			fn(b)
  1775  		}
  1776  	}
  1777  
  1778  	withChurn := func(fn func(b *testing.B)) func(b *testing.B) {
  1779  		return func(b *testing.B) {
  1780  			ctx := context.Background()
  1781  			ctx, cancel := context.WithCancel(ctx)
  1782  			defer cancel()
  1783  
  1784  			var ready sync.WaitGroup
  1785  			ready.Add(1)
  1786  			var count int64
  1787  			var churn func(i int)
  1788  			churn = func(i int) {
  1789  				SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
  1790  				atomic.AddInt64(&count, 1)
  1791  				if i == 0 {
  1792  					ready.Done()
  1793  				}
  1794  				if ctx.Err() == nil {
  1795  					go churn(i + 1)
  1796  				}
  1797  			}
  1798  			go func() {
  1799  				churn(0)
  1800  			}()
  1801  			ready.Wait()
  1802  
  1803  			fn(b)
  1804  			b.ReportMetric(float64(atomic.LoadInt64(&count))/float64(b.N), "concurrent_launches/op")
  1805  		}
  1806  	}
  1807  
  1808  	benchWriteTo := func(b *testing.B) {
  1809  		goroutineProf := Lookup("goroutine")
  1810  		b.ResetTimer()
  1811  		for i := 0; i < b.N; i++ {
  1812  			goroutineProf.WriteTo(io.Discard, 0)
  1813  		}
  1814  		b.StopTimer()
  1815  	}
  1816  
  1817  	benchGoroutineProfile := func(b *testing.B) {
  1818  		p := make([]runtime.StackRecord, 10000)
  1819  		b.ResetTimer()
  1820  		for i := 0; i < b.N; i++ {
  1821  			runtime.GoroutineProfile(p)
  1822  		}
  1823  		b.StopTimer()
  1824  	}
  1825  
  1826  	// Note that some costs of collecting a goroutine profile depend on the
  1827  	// length of the runtime.allgs slice, which never shrinks. Stay within race
  1828  	// detector's 8k-goroutine limit
  1829  	for _, n := range []int{50, 500, 5000} {
  1830  		b.Run(fmt.Sprintf("Profile.WriteTo idle %d", n), withIdle(n, benchWriteTo))
  1831  		b.Run(fmt.Sprintf("Profile.WriteTo churn %d", n), withIdle(n, withChurn(benchWriteTo)))
  1832  		b.Run(fmt.Sprintf("runtime.GoroutineProfile churn %d", n), withIdle(n, withChurn(benchGoroutineProfile)))
  1833  	}
  1834  }
  1835  
  1836  var emptyCallStackTestRun int64
  1837  
  1838  // Issue 18836.
  1839  func TestEmptyCallStack(t *testing.T) {
  1840  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
  1841  	emptyCallStackTestRun++
  1842  
  1843  	t.Parallel()
  1844  	var buf strings.Builder
  1845  	p := NewProfile(name)
  1846  
  1847  	p.Add("foo", 47674)
  1848  	p.WriteTo(&buf, 1)
  1849  	p.Remove("foo")
  1850  	got := buf.String()
  1851  	prefix := name + " profile: total 1\n"
  1852  	if !strings.HasPrefix(got, prefix) {
  1853  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
  1854  	}
  1855  	lostevent := "lostProfileEvent"
  1856  	if !strings.Contains(got, lostevent) {
  1857  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
  1858  	}
  1859  }
  1860  
  1861  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
  1862  // and value and has funcname somewhere in the stack.
  1863  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
  1864  	base, kv, ok := strings.Cut(spec, ";")
  1865  	if !ok {
  1866  		panic("no semicolon in key/value spec")
  1867  	}
  1868  	k, v, ok := strings.Cut(kv, "=")
  1869  	if !ok {
  1870  		panic("missing = in key/value spec")
  1871  	}
  1872  	if !contains(labels[k], v) {
  1873  		return false
  1874  	}
  1875  	return stackContains(base, count, stk, labels)
  1876  }
  1877  
  1878  func TestCPUProfileLabel(t *testing.T) {
  1879  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions())
  1880  	testCPUProfile(t, matches, func(dur time.Duration) {
  1881  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1882  			cpuHogger(cpuHog1, &salt1, dur)
  1883  		})
  1884  	})
  1885  }
  1886  
  1887  func TestLabelRace(t *testing.T) {
  1888  	testenv.MustHaveParallelism(t)
  1889  	// Test the race detector annotations for synchronization
  1890  	// between setting labels and consuming them from the
  1891  	// profile.
  1892  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil)
  1893  	testCPUProfile(t, matches, func(dur time.Duration) {
  1894  		start := time.Now()
  1895  		var wg sync.WaitGroup
  1896  		for time.Since(start) < dur {
  1897  			var salts [10]int
  1898  			for i := 0; i < 10; i++ {
  1899  				wg.Add(1)
  1900  				go func(j int) {
  1901  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1902  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
  1903  					})
  1904  					wg.Done()
  1905  				}(i)
  1906  			}
  1907  			wg.Wait()
  1908  		}
  1909  	})
  1910  }
  1911  
  1912  func TestGoroutineProfileLabelRace(t *testing.T) {
  1913  	testenv.MustHaveParallelism(t)
  1914  	// Test the race detector annotations for synchronization
  1915  	// between setting labels and consuming them from the
  1916  	// goroutine profile. See issue #50292.
  1917  
  1918  	t.Run("reset", func(t *testing.T) {
  1919  		ctx := context.Background()
  1920  		ctx, cancel := context.WithCancel(ctx)
  1921  		defer cancel()
  1922  
  1923  		go func() {
  1924  			goroutineProf := Lookup("goroutine")
  1925  			for ctx.Err() == nil {
  1926  				var w strings.Builder
  1927  				goroutineProf.WriteTo(&w, 1)
  1928  				prof := w.String()
  1929  				if strings.Contains(prof, "loop-i") {
  1930  					cancel()
  1931  				}
  1932  			}
  1933  		}()
  1934  
  1935  		for i := 0; ctx.Err() == nil; i++ {
  1936  			Do(ctx, Labels("loop-i", fmt.Sprint(i)), func(ctx context.Context) {
  1937  			})
  1938  		}
  1939  	})
  1940  
  1941  	t.Run("churn", func(t *testing.T) {
  1942  		ctx := context.Background()
  1943  		ctx, cancel := context.WithCancel(ctx)
  1944  		defer cancel()
  1945  
  1946  		var ready sync.WaitGroup
  1947  		ready.Add(1)
  1948  		var churn func(i int)
  1949  		churn = func(i int) {
  1950  			SetGoroutineLabels(WithLabels(ctx, Labels("churn-i", fmt.Sprint(i))))
  1951  			if i == 0 {
  1952  				ready.Done()
  1953  			}
  1954  			if ctx.Err() == nil {
  1955  				go churn(i + 1)
  1956  			}
  1957  		}
  1958  		go func() {
  1959  			churn(0)
  1960  		}()
  1961  		ready.Wait()
  1962  
  1963  		goroutineProf := Lookup("goroutine")
  1964  		for i := 0; i < 10; i++ {
  1965  			goroutineProf.WriteTo(io.Discard, 1)
  1966  		}
  1967  	})
  1968  }
  1969  
  1970  // TestLabelSystemstack makes sure CPU profiler samples of goroutines running
  1971  // on systemstack include the correct pprof labels. See issue #48577
  1972  func TestLabelSystemstack(t *testing.T) {
  1973  	// Grab and re-set the initial value before continuing to ensure
  1974  	// GOGC doesn't actually change following the test.
  1975  	gogc := debug.SetGCPercent(100)
  1976  	debug.SetGCPercent(gogc)
  1977  
  1978  	matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions())
  1979  	p := testCPUProfile(t, matches, func(dur time.Duration) {
  1980  		Do(context.Background(), Labels("key", "value"), func(ctx context.Context) {
  1981  			parallelLabelHog(ctx, dur, gogc)
  1982  		})
  1983  	})
  1984  
  1985  	// Two conditions to check:
  1986  	// * labelHog should always be labeled.
  1987  	// * The label should _only_ appear on labelHog and the Do call above.
  1988  	for _, s := range p.Sample {
  1989  		isLabeled := s.Label != nil && contains(s.Label["key"], "value")
  1990  		var (
  1991  			mayBeLabeled     bool
  1992  			mustBeLabeled    string
  1993  			mustNotBeLabeled string
  1994  		)
  1995  		for _, loc := range s.Location {
  1996  			for _, l := range loc.Line {
  1997  				switch l.Function.Name {
  1998  				case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1":
  1999  					mustBeLabeled = l.Function.Name
  2000  				case "runtime/pprof.Do":
  2001  					// Do sets the labels, so samples may
  2002  					// or may not be labeled depending on
  2003  					// which part of the function they are
  2004  					// at.
  2005  					mayBeLabeled = true
  2006  				case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon":
  2007  					// Runtime system goroutines or threads
  2008  					// (such as those identified by
  2009  					// runtime.isSystemGoroutine). These
  2010  					// should never be labeled.
  2011  					mustNotBeLabeled = l.Function.Name
  2012  				case "gogo", "gosave_systemstack_switch", "racecall":
  2013  					// These are context switch/race
  2014  					// critical that we can't do a full
  2015  					// traceback from. Typically this would
  2016  					// be covered by the runtime check
  2017  					// below, but these symbols don't have
  2018  					// the package name.
  2019  					mayBeLabeled = true
  2020  				}
  2021  
  2022  				if strings.HasPrefix(l.Function.Name, "runtime.") {
  2023  					// There are many places in the runtime
  2024  					// where we can't do a full traceback.
  2025  					// Ideally we'd list them all, but
  2026  					// barring that allow anything in the
  2027  					// runtime, unless explicitly excluded
  2028  					// above.
  2029  					mayBeLabeled = true
  2030  				}
  2031  			}
  2032  		}
  2033  		errorStack := func(f string, args ...any) {
  2034  			var buf strings.Builder
  2035  			fprintStack(&buf, s.Location)
  2036  			t.Errorf("%s: %s", fmt.Sprintf(f, args...), buf.String())
  2037  		}
  2038  		if mustBeLabeled != "" && mustNotBeLabeled != "" {
  2039  			errorStack("sample contains both %s, which must be labeled, and %s, which must not be labeled", mustBeLabeled, mustNotBeLabeled)
  2040  			continue
  2041  		}
  2042  		if mustBeLabeled != "" || mustNotBeLabeled != "" {
  2043  			// We found a definitive frame, so mayBeLabeled hints are not relevant.
  2044  			mayBeLabeled = false
  2045  		}
  2046  		if mayBeLabeled {
  2047  			// This sample may or may not be labeled, so there's nothing we can check.
  2048  			continue
  2049  		}
  2050  		if mustBeLabeled != "" && !isLabeled {
  2051  			errorStack("sample must be labeled because of %s, but is not", mustBeLabeled)
  2052  		}
  2053  		if mustNotBeLabeled != "" && isLabeled {
  2054  			errorStack("sample must not be labeled because of %s, but is", mustNotBeLabeled)
  2055  		}
  2056  	}
  2057  }
  2058  
  2059  // labelHog is designed to burn CPU time in a way that a high number of CPU
  2060  // samples end up running on systemstack.
  2061  func labelHog(stop chan struct{}, gogc int) {
  2062  	// Regression test for issue 50032. We must give GC an opportunity to
  2063  	// be initially triggered by a labelled goroutine.
  2064  	runtime.GC()
  2065  
  2066  	for i := 0; ; i++ {
  2067  		select {
  2068  		case <-stop:
  2069  			return
  2070  		default:
  2071  			debug.SetGCPercent(gogc)
  2072  		}
  2073  	}
  2074  }
  2075  
  2076  // parallelLabelHog runs GOMAXPROCS goroutines running labelHog.
  2077  func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) {
  2078  	var wg sync.WaitGroup
  2079  	stop := make(chan struct{})
  2080  	for i := 0; i < runtime.GOMAXPROCS(0); i++ {
  2081  		wg.Add(1)
  2082  		go func() {
  2083  			defer wg.Done()
  2084  			labelHog(stop, gogc)
  2085  		}()
  2086  	}
  2087  
  2088  	time.Sleep(dur)
  2089  	close(stop)
  2090  	wg.Wait()
  2091  }
  2092  
  2093  // Check that there is no deadlock when the program receives SIGPROF while in
  2094  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
  2095  func TestAtomicLoadStore64(t *testing.T) {
  2096  	f, err := os.CreateTemp("", "profatomic")
  2097  	if err != nil {
  2098  		t.Fatalf("TempFile: %v", err)
  2099  	}
  2100  	defer os.Remove(f.Name())
  2101  	defer f.Close()
  2102  
  2103  	if err := StartCPUProfile(f); err != nil {
  2104  		t.Fatal(err)
  2105  	}
  2106  	defer StopCPUProfile()
  2107  
  2108  	var flag uint64
  2109  	done := make(chan bool, 1)
  2110  
  2111  	go func() {
  2112  		for atomic.LoadUint64(&flag) == 0 {
  2113  			runtime.Gosched()
  2114  		}
  2115  		done <- true
  2116  	}()
  2117  	time.Sleep(50 * time.Millisecond)
  2118  	atomic.StoreUint64(&flag, 1)
  2119  	<-done
  2120  }
  2121  
  2122  func TestTracebackAll(t *testing.T) {
  2123  	// With gccgo, if a profiling signal arrives at the wrong time
  2124  	// during traceback, it may crash or hang. See issue #29448.
  2125  	f, err := os.CreateTemp("", "proftraceback")
  2126  	if err != nil {
  2127  		t.Fatalf("TempFile: %v", err)
  2128  	}
  2129  	defer os.Remove(f.Name())
  2130  	defer f.Close()
  2131  
  2132  	if err := StartCPUProfile(f); err != nil {
  2133  		t.Fatal(err)
  2134  	}
  2135  	defer StopCPUProfile()
  2136  
  2137  	ch := make(chan int)
  2138  	defer close(ch)
  2139  
  2140  	count := 10
  2141  	for i := 0; i < count; i++ {
  2142  		go func() {
  2143  			<-ch // block
  2144  		}()
  2145  	}
  2146  
  2147  	N := 10000
  2148  	if testing.Short() {
  2149  		N = 500
  2150  	}
  2151  	buf := make([]byte, 10*1024)
  2152  	for i := 0; i < N; i++ {
  2153  		runtime.Stack(buf, true)
  2154  	}
  2155  }
  2156  
  2157  // TestTryAdd tests the cases that are hard to test with real program execution.
  2158  //
  2159  // For example, the current go compilers may not always inline functions
  2160  // involved in recursion but that may not be true in the future compilers. This
  2161  // tests such cases by using fake call sequences and forcing the profile build
  2162  // utilizing translateCPUProfile defined in proto_test.go
  2163  func TestTryAdd(t *testing.T) {
  2164  	if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
  2165  		t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
  2166  	}
  2167  
  2168  	// inlinedCallerDump
  2169  	//   inlinedCalleeDump
  2170  	pcs := make([]uintptr, 2)
  2171  	inlinedCallerDump(pcs)
  2172  	inlinedCallerStack := make([]uint64, 2)
  2173  	for i := range pcs {
  2174  		inlinedCallerStack[i] = uint64(pcs[i])
  2175  	}
  2176  	wrapperPCs := make([]uintptr, 1)
  2177  	inlinedWrapperCallerDump(wrapperPCs)
  2178  
  2179  	if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
  2180  		t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
  2181  	}
  2182  
  2183  	// recursionChainTop
  2184  	//   recursionChainMiddle
  2185  	//     recursionChainBottom
  2186  	//       recursionChainTop
  2187  	//         recursionChainMiddle
  2188  	//           recursionChainBottom
  2189  	pcs = make([]uintptr, 6)
  2190  	recursionChainTop(1, pcs)
  2191  	recursionStack := make([]uint64, len(pcs))
  2192  	for i := range pcs {
  2193  		recursionStack[i] = uint64(pcs[i])
  2194  	}
  2195  
  2196  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
  2197  
  2198  	testCases := []struct {
  2199  		name        string
  2200  		input       []uint64          // following the input format assumed by profileBuilder.addCPUData.
  2201  		count       int               // number of records in input.
  2202  		wantLocs    [][]string        // ordered location entries with function names.
  2203  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
  2204  	}{{
  2205  		// Sanity test for a normal, complete stack trace.
  2206  		name: "full_stack_trace",
  2207  		input: []uint64{
  2208  			3, 0, 500, // hz = 500. Must match the period.
  2209  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  2210  		},
  2211  		count: 2,
  2212  		wantLocs: [][]string{
  2213  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  2214  		},
  2215  		wantSamples: []*profile.Sample{
  2216  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  2217  		},
  2218  	}, {
  2219  		name: "bug35538",
  2220  		input: []uint64{
  2221  			3, 0, 500, // hz = 500. Must match the period.
  2222  			// Fake frame: tryAdd will have inlinedCallerDump
  2223  			// (stack[1]) on the deck when it encounters the next
  2224  			// inline function. It should accept this.
  2225  			7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
  2226  			5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
  2227  		},
  2228  		count:    3,
  2229  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2230  		wantSamples: []*profile.Sample{
  2231  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  2232  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
  2233  		},
  2234  	}, {
  2235  		name: "bug38096",
  2236  		input: []uint64{
  2237  			3, 0, 500, // hz = 500. Must match the period.
  2238  			// count (data[2]) == 0 && len(stk) == 1 is an overflow
  2239  			// entry. The "stk" entry is actually the count.
  2240  			4, 0, 0, 4242,
  2241  		},
  2242  		count:    2,
  2243  		wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
  2244  		wantSamples: []*profile.Sample{
  2245  			{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
  2246  		},
  2247  	}, {
  2248  		// If a function is directly called recursively then it must
  2249  		// not be inlined in the caller.
  2250  		//
  2251  		// N.B. We're generating an impossible profile here, with a
  2252  		// recursive inlineCalleeDump call. This is simulating a non-Go
  2253  		// function that looks like an inlined Go function other than
  2254  		// its recursive property. See pcDeck.tryAdd.
  2255  		name: "directly_recursive_func_is_not_inlined",
  2256  		input: []uint64{
  2257  			3, 0, 500, // hz = 500. Must match the period.
  2258  			5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
  2259  			4, 0, 40, inlinedCallerStack[0],
  2260  		},
  2261  		count: 3,
  2262  		// inlinedCallerDump shows up here because
  2263  		// runtime_expandFinalInlineFrame adds it to the stack frame.
  2264  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
  2265  		wantSamples: []*profile.Sample{
  2266  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
  2267  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
  2268  		},
  2269  	}, {
  2270  		name: "recursion_chain_inline",
  2271  		input: []uint64{
  2272  			3, 0, 500, // hz = 500. Must match the period.
  2273  			9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
  2274  		},
  2275  		count: 2,
  2276  		wantLocs: [][]string{
  2277  			{"runtime/pprof.recursionChainBottom"},
  2278  			{
  2279  				"runtime/pprof.recursionChainMiddle",
  2280  				"runtime/pprof.recursionChainTop",
  2281  				"runtime/pprof.recursionChainBottom",
  2282  			},
  2283  			{
  2284  				"runtime/pprof.recursionChainMiddle",
  2285  				"runtime/pprof.recursionChainTop",
  2286  				"runtime/pprof.TestTryAdd", // inlined into the test.
  2287  			},
  2288  		},
  2289  		wantSamples: []*profile.Sample{
  2290  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
  2291  		},
  2292  	}, {
  2293  		name: "truncated_stack_trace_later",
  2294  		input: []uint64{
  2295  			3, 0, 500, // hz = 500. Must match the period.
  2296  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  2297  			4, 0, 60, inlinedCallerStack[0],
  2298  		},
  2299  		count:    3,
  2300  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2301  		wantSamples: []*profile.Sample{
  2302  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  2303  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
  2304  		},
  2305  	}, {
  2306  		name: "truncated_stack_trace_first",
  2307  		input: []uint64{
  2308  			3, 0, 500, // hz = 500. Must match the period.
  2309  			4, 0, 70, inlinedCallerStack[0],
  2310  			5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
  2311  		},
  2312  		count:    3,
  2313  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2314  		wantSamples: []*profile.Sample{
  2315  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  2316  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
  2317  		},
  2318  	}, {
  2319  		// We can recover the inlined caller from a truncated stack.
  2320  		name: "truncated_stack_trace_only",
  2321  		input: []uint64{
  2322  			3, 0, 500, // hz = 500. Must match the period.
  2323  			4, 0, 70, inlinedCallerStack[0],
  2324  		},
  2325  		count:    2,
  2326  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  2327  		wantSamples: []*profile.Sample{
  2328  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  2329  		},
  2330  	}, {
  2331  		// The same location is used for duplicated stacks.
  2332  		name: "truncated_stack_trace_twice",
  2333  		input: []uint64{
  2334  			3, 0, 500, // hz = 500. Must match the period.
  2335  			4, 0, 70, inlinedCallerStack[0],
  2336  			// Fake frame: add a fake call to
  2337  			// inlinedCallerDump to prevent this sample
  2338  			// from getting merged into above.
  2339  			5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
  2340  		},
  2341  		count: 3,
  2342  		wantLocs: [][]string{
  2343  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  2344  			{"runtime/pprof.inlinedCallerDump"},
  2345  		},
  2346  		wantSamples: []*profile.Sample{
  2347  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  2348  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
  2349  		},
  2350  	}, {
  2351  		name: "expand_wrapper_function",
  2352  		input: []uint64{
  2353  			3, 0, 500, // hz = 500. Must match the period.
  2354  			4, 0, 50, uint64(wrapperPCs[0]),
  2355  		},
  2356  		count:    2,
  2357  		wantLocs: [][]string{{"runtime/pprof.inlineWrapper.dump"}},
  2358  		wantSamples: []*profile.Sample{
  2359  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  2360  		},
  2361  	}}
  2362  
  2363  	for _, tc := range testCases {
  2364  		t.Run(tc.name, func(t *testing.T) {
  2365  			p, err := translateCPUProfile(tc.input, tc.count)
  2366  			if err != nil {
  2367  				t.Fatalf("translating profile: %v", err)
  2368  			}
  2369  			t.Logf("Profile: %v\n", p)
  2370  
  2371  			// One location entry with all inlined functions.
  2372  			var gotLoc [][]string
  2373  			for _, loc := range p.Location {
  2374  				var names []string
  2375  				for _, line := range loc.Line {
  2376  					names = append(names, line.Function.Name)
  2377  				}
  2378  				gotLoc = append(gotLoc, names)
  2379  			}
  2380  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
  2381  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
  2382  			}
  2383  			// All samples should point to one location.
  2384  			var gotSamples []*profile.Sample
  2385  			for _, sample := range p.Sample {
  2386  				var locs []*profile.Location
  2387  				for _, loc := range sample.Location {
  2388  					locs = append(locs, &profile.Location{ID: loc.ID})
  2389  				}
  2390  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
  2391  			}
  2392  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
  2393  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
  2394  			}
  2395  		})
  2396  	}
  2397  }
  2398  
  2399  func TestTimeVDSO(t *testing.T) {
  2400  	// Test that time functions have the right stack trace. In particular,
  2401  	// it shouldn't be recursive.
  2402  
  2403  	if runtime.GOOS == "android" {
  2404  		// Flaky on Android, issue 48655. VDSO may not be enabled.
  2405  		testenv.SkipFlaky(t, 48655)
  2406  	}
  2407  
  2408  	matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions())
  2409  	p := testCPUProfile(t, matches, func(dur time.Duration) {
  2410  		t0 := time.Now()
  2411  		for {
  2412  			t := time.Now()
  2413  			if t.Sub(t0) >= dur {
  2414  				return
  2415  			}
  2416  		}
  2417  	})
  2418  
  2419  	// Check for recursive time.now sample.
  2420  	for _, sample := range p.Sample {
  2421  		var seenNow bool
  2422  		for _, loc := range sample.Location {
  2423  			for _, line := range loc.Line {
  2424  				if line.Function.Name == "time.now" {
  2425  					if seenNow {
  2426  						t.Fatalf("unexpected recursive time.now")
  2427  					}
  2428  					seenNow = true
  2429  				}
  2430  			}
  2431  		}
  2432  	}
  2433  }
  2434  

View as plain text