Source file src/runtime/mprof.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Malloc profiling.
     6  // Patterned after tcmalloc's algorithms; shorter code.
     7  
     8  package runtime
     9  
    10  import (
    11  	"internal/abi"
    12  	"internal/goarch"
    13  	"internal/profilerecord"
    14  	"internal/runtime/atomic"
    15  	"internal/runtime/sys"
    16  	"unsafe"
    17  )
    18  
    19  // NOTE(rsc): Everything here could use cas if contention became an issue.
    20  var (
    21  	// profInsertLock protects changes to the start of all *bucket linked lists
    22  	profInsertLock mutex
    23  	// profBlockLock protects the contents of every blockRecord struct
    24  	profBlockLock mutex
    25  	// profMemActiveLock protects the active field of every memRecord struct
    26  	profMemActiveLock mutex
    27  	// profMemFutureLock is a set of locks that protect the respective elements
    28  	// of the future array of every memRecord struct
    29  	profMemFutureLock [len(memRecord{}.future)]mutex
    30  )
    31  
    32  // All memory allocations are local and do not escape outside of the profiler.
    33  // The profiler is forbidden from referring to garbage-collected memory.
    34  
    35  const (
    36  	// profile types
    37  	memProfile bucketType = 1 + iota
    38  	blockProfile
    39  	mutexProfile
    40  
    41  	// size of bucket hash table
    42  	buckHashSize = 179999
    43  
    44  	// maxSkip is to account for deferred inline expansion
    45  	// when using frame pointer unwinding. We record the stack
    46  	// with "physical" frame pointers but handle skipping "logical"
    47  	// frames at some point after collecting the stack. So
    48  	// we need extra space in order to avoid getting fewer than the
    49  	// desired maximum number of frames after expansion.
    50  	// This should be at least as large as the largest skip value
    51  	// used for profiling; otherwise stacks may be truncated inconsistently
    52  	maxSkip = 6
    53  
    54  	// maxProfStackDepth is the highest valid value for debug.profstackdepth.
    55  	// It's used for the bucket.stk func.
    56  	// TODO(fg): can we get rid of this?
    57  	maxProfStackDepth = 1024
    58  )
    59  
    60  type bucketType int
    61  
    62  // A bucket holds per-call-stack profiling information.
    63  // The representation is a bit sleazy, inherited from C.
    64  // This struct defines the bucket header. It is followed in
    65  // memory by the stack words and then the actual record
    66  // data, either a memRecord or a blockRecord.
    67  //
    68  // Per-call-stack profiling information.
    69  // Lookup by hashing call stack into a linked-list hash table.
    70  //
    71  // None of the fields in this bucket header are modified after
    72  // creation, including its next and allnext links.
    73  //
    74  // No heap pointers.
    75  type bucket struct {
    76  	_       sys.NotInHeap
    77  	next    *bucket
    78  	allnext *bucket
    79  	typ     bucketType // memBucket or blockBucket (includes mutexProfile)
    80  	hash    uintptr
    81  	size    uintptr
    82  	nstk    uintptr
    83  }
    84  
    85  // A memRecord is the bucket data for a bucket of type memProfile,
    86  // part of the memory profile.
    87  type memRecord struct {
    88  	// The following complex 3-stage scheme of stats accumulation
    89  	// is required to obtain a consistent picture of mallocs and frees
    90  	// for some point in time.
    91  	// The problem is that mallocs come in real time, while frees
    92  	// come only after a GC during concurrent sweeping. So if we would
    93  	// naively count them, we would get a skew toward mallocs.
    94  	//
    95  	// Hence, we delay information to get consistent snapshots as
    96  	// of mark termination. Allocations count toward the next mark
    97  	// termination's snapshot, while sweep frees count toward the
    98  	// previous mark termination's snapshot:
    99  	//
   100  	//              MT          MT          MT          MT
   101  	//             .·|         .·|         .·|         .·|
   102  	//          .·˙  |      .·˙  |      .·˙  |      .·˙  |
   103  	//       .·˙     |   .·˙     |   .·˙     |   .·˙     |
   104  	//    .·˙        |.·˙        |.·˙        |.·˙        |
   105  	//
   106  	//       alloc → ▲ ← free
   107  	//               ┠┅┅┅┅┅┅┅┅┅┅┅P
   108  	//       C+2     →    C+1    →  C
   109  	//
   110  	//                   alloc → ▲ ← free
   111  	//                           ┠┅┅┅┅┅┅┅┅┅┅┅P
   112  	//                   C+2     →    C+1    →  C
   113  	//
   114  	// Since we can't publish a consistent snapshot until all of
   115  	// the sweep frees are accounted for, we wait until the next
   116  	// mark termination ("MT" above) to publish the previous mark
   117  	// termination's snapshot ("P" above). To do this, allocation
   118  	// and free events are accounted to *future* heap profile
   119  	// cycles ("C+n" above) and we only publish a cycle once all
   120  	// of the events from that cycle must be done. Specifically:
   121  	//
   122  	// Mallocs are accounted to cycle C+2.
   123  	// GC frees (done during sweeping) are accounted to cycle C+1.
   124  	//
   125  	// After mark termination, we increment the global heap
   126  	// profile cycle counter and accumulate the stats from cycle C
   127  	// into the active profile.
   128  
   129  	// active is the currently published profile. A profiling
   130  	// cycle can be accumulated into active once its complete.
   131  	active memRecordCycle
   132  
   133  	// future records the profile events we're counting for cycles
   134  	// that have not yet been published. This is ring buffer
   135  	// indexed by the global heap profile cycle C and stores
   136  	// cycles C, C+1, and C+2. Unlike active, these counts are
   137  	// only for a single cycle; they are not cumulative across
   138  	// cycles.
   139  	//
   140  	// We store cycle C here because there's a window between when
   141  	// C becomes the active cycle and when we've flushed it to
   142  	// active.
   143  	future [3]memRecordCycle
   144  }
   145  
   146  // memRecordCycle
   147  type memRecordCycle struct {
   148  	allocs, frees uintptr
   149  }
   150  
   151  // add accumulates b into a. It does not zero b.
   152  func (a *memRecordCycle) add(b *memRecordCycle) {
   153  	a.allocs += b.allocs
   154  	a.frees += b.frees
   155  }
   156  
   157  // A blockRecord is the bucket data for a bucket of type blockProfile,
   158  // which is used in blocking and mutex profiles.
   159  type blockRecord struct {
   160  	count  float64
   161  	cycles int64
   162  }
   163  
   164  var (
   165  	mbuckets atomic.UnsafePointer // *bucket, memory profile buckets
   166  	bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets
   167  	xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets
   168  	buckhash atomic.UnsafePointer // *buckhashArray
   169  
   170  	mProfCycle mProfCycleHolder
   171  )
   172  
   173  type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket
   174  
   175  const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
   176  
   177  // mProfCycleHolder holds the global heap profile cycle number (wrapped at
   178  // mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to
   179  // indicate whether future[cycle] in all buckets has been queued to flush into
   180  // the active profile.
   181  type mProfCycleHolder struct {
   182  	value atomic.Uint32
   183  }
   184  
   185  // read returns the current cycle count.
   186  func (c *mProfCycleHolder) read() (cycle uint32) {
   187  	v := c.value.Load()
   188  	cycle = v >> 1
   189  	return cycle
   190  }
   191  
   192  // setFlushed sets the flushed flag. It returns the current cycle count and the
   193  // previous value of the flushed flag.
   194  func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
   195  	for {
   196  		prev := c.value.Load()
   197  		cycle = prev >> 1
   198  		alreadyFlushed = (prev & 0x1) != 0
   199  		next := prev | 0x1
   200  		if c.value.CompareAndSwap(prev, next) {
   201  			return cycle, alreadyFlushed
   202  		}
   203  	}
   204  }
   205  
   206  // increment increases the cycle count by one, wrapping the value at
   207  // mProfCycleWrap. It clears the flushed flag.
   208  func (c *mProfCycleHolder) increment() {
   209  	// We explicitly wrap mProfCycle rather than depending on
   210  	// uint wraparound because the memRecord.future ring does not
   211  	// itself wrap at a power of two.
   212  	for {
   213  		prev := c.value.Load()
   214  		cycle := prev >> 1
   215  		cycle = (cycle + 1) % mProfCycleWrap
   216  		next := cycle << 1
   217  		if c.value.CompareAndSwap(prev, next) {
   218  			break
   219  		}
   220  	}
   221  }
   222  
   223  // newBucket allocates a bucket with the given type and number of stack entries.
   224  func newBucket(typ bucketType, nstk int) *bucket {
   225  	size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
   226  	switch typ {
   227  	default:
   228  		throw("invalid profile bucket type")
   229  	case memProfile:
   230  		size += unsafe.Sizeof(memRecord{})
   231  	case blockProfile, mutexProfile:
   232  		size += unsafe.Sizeof(blockRecord{})
   233  	}
   234  
   235  	b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
   236  	b.typ = typ
   237  	b.nstk = uintptr(nstk)
   238  	return b
   239  }
   240  
   241  // stk returns the slice in b holding the stack. The caller can assume that the
   242  // backing array is immutable.
   243  func (b *bucket) stk() []uintptr {
   244  	stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
   245  	if b.nstk > maxProfStackDepth {
   246  		// prove that slicing works; otherwise a failure requires a P
   247  		throw("bad profile stack count")
   248  	}
   249  	return stk[:b.nstk:b.nstk]
   250  }
   251  
   252  // mp returns the memRecord associated with the memProfile bucket b.
   253  func (b *bucket) mp() *memRecord {
   254  	if b.typ != memProfile {
   255  		throw("bad use of bucket.mp")
   256  	}
   257  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   258  	return (*memRecord)(data)
   259  }
   260  
   261  // bp returns the blockRecord associated with the blockProfile bucket b.
   262  func (b *bucket) bp() *blockRecord {
   263  	if b.typ != blockProfile && b.typ != mutexProfile {
   264  		throw("bad use of bucket.bp")
   265  	}
   266  	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
   267  	return (*blockRecord)(data)
   268  }
   269  
   270  // Return the bucket for stk[0:nstk], allocating new bucket if needed.
   271  func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
   272  	bh := (*buckhashArray)(buckhash.Load())
   273  	if bh == nil {
   274  		lock(&profInsertLock)
   275  		// check again under the lock
   276  		bh = (*buckhashArray)(buckhash.Load())
   277  		if bh == nil {
   278  			bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys, "profiler hash buckets"))
   279  			if bh == nil {
   280  				throw("runtime: cannot allocate memory")
   281  			}
   282  			buckhash.StoreNoWB(unsafe.Pointer(bh))
   283  		}
   284  		unlock(&profInsertLock)
   285  	}
   286  
   287  	// Hash stack.
   288  	var h uintptr
   289  	for _, pc := range stk {
   290  		h += pc
   291  		h += h << 10
   292  		h ^= h >> 6
   293  	}
   294  	// hash in size
   295  	h += size
   296  	h += h << 10
   297  	h ^= h >> 6
   298  	// finalize
   299  	h += h << 3
   300  	h ^= h >> 11
   301  
   302  	i := int(h % buckHashSize)
   303  	// first check optimistically, without the lock
   304  	for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
   305  		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
   306  			return b
   307  		}
   308  	}
   309  
   310  	if !alloc {
   311  		return nil
   312  	}
   313  
   314  	lock(&profInsertLock)
   315  	// check again under the insertion lock
   316  	for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
   317  		if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
   318  			unlock(&profInsertLock)
   319  			return b
   320  		}
   321  	}
   322  
   323  	// Create new bucket.
   324  	b := newBucket(typ, len(stk))
   325  	copy(b.stk(), stk)
   326  	b.hash = h
   327  	b.size = size
   328  
   329  	var allnext *atomic.UnsafePointer
   330  	if typ == memProfile {
   331  		allnext = &mbuckets
   332  	} else if typ == mutexProfile {
   333  		allnext = &xbuckets
   334  	} else {
   335  		allnext = &bbuckets
   336  	}
   337  
   338  	b.next = (*bucket)(bh[i].Load())
   339  	b.allnext = (*bucket)(allnext.Load())
   340  
   341  	bh[i].StoreNoWB(unsafe.Pointer(b))
   342  	allnext.StoreNoWB(unsafe.Pointer(b))
   343  
   344  	unlock(&profInsertLock)
   345  	return b
   346  }
   347  
   348  func eqslice(x, y []uintptr) bool {
   349  	if len(x) != len(y) {
   350  		return false
   351  	}
   352  	for i, xi := range x {
   353  		if xi != y[i] {
   354  			return false
   355  		}
   356  	}
   357  	return true
   358  }
   359  
   360  // mProf_NextCycle publishes the next heap profile cycle and creates a
   361  // fresh heap profile cycle. This operation is fast and can be done
   362  // during STW. The caller must call mProf_Flush before calling
   363  // mProf_NextCycle again.
   364  //
   365  // This is called by mark termination during STW so allocations and
   366  // frees after the world is started again count towards a new heap
   367  // profiling cycle.
   368  func mProf_NextCycle() {
   369  	mProfCycle.increment()
   370  }
   371  
   372  // mProf_Flush flushes the events from the current heap profiling
   373  // cycle into the active profile. After this it is safe to start a new
   374  // heap profiling cycle with mProf_NextCycle.
   375  //
   376  // This is called by GC after mark termination starts the world. In
   377  // contrast with mProf_NextCycle, this is somewhat expensive, but safe
   378  // to do concurrently.
   379  func mProf_Flush() {
   380  	cycle, alreadyFlushed := mProfCycle.setFlushed()
   381  	if alreadyFlushed {
   382  		return
   383  	}
   384  
   385  	index := cycle % uint32(len(memRecord{}.future))
   386  	lock(&profMemActiveLock)
   387  	lock(&profMemFutureLock[index])
   388  	mProf_FlushLocked(index)
   389  	unlock(&profMemFutureLock[index])
   390  	unlock(&profMemActiveLock)
   391  }
   392  
   393  // mProf_FlushLocked flushes the events from the heap profiling cycle at index
   394  // into the active profile. The caller must hold the lock for the active profile
   395  // (profMemActiveLock) and for the profiling cycle at index
   396  // (profMemFutureLock[index]).
   397  func mProf_FlushLocked(index uint32) {
   398  	assertLockHeld(&profMemActiveLock)
   399  	assertLockHeld(&profMemFutureLock[index])
   400  	head := (*bucket)(mbuckets.Load())
   401  	for b := head; b != nil; b = b.allnext {
   402  		mp := b.mp()
   403  
   404  		// Flush cycle C into the published profile and clear
   405  		// it for reuse.
   406  		mpc := &mp.future[index]
   407  		mp.active.add(mpc)
   408  		*mpc = memRecordCycle{}
   409  	}
   410  }
   411  
   412  // mProf_PostSweep records that all sweep frees for this GC cycle have
   413  // completed. This has the effect of publishing the heap profile
   414  // snapshot as of the last mark termination without advancing the heap
   415  // profile cycle.
   416  func mProf_PostSweep() {
   417  	// Flush cycle C+1 to the active profile so everything as of
   418  	// the last mark termination becomes visible. *Don't* advance
   419  	// the cycle, since we're still accumulating allocs in cycle
   420  	// C+2, which have to become C+1 in the next mark termination
   421  	// and so on.
   422  	cycle := mProfCycle.read() + 1
   423  
   424  	index := cycle % uint32(len(memRecord{}.future))
   425  	lock(&profMemActiveLock)
   426  	lock(&profMemFutureLock[index])
   427  	mProf_FlushLocked(index)
   428  	unlock(&profMemFutureLock[index])
   429  	unlock(&profMemActiveLock)
   430  }
   431  
   432  // Called by malloc to record a profiled block.
   433  func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
   434  	if mp.profStack == nil {
   435  		// mp.profStack is nil if we happen to sample an allocation during the
   436  		// initialization of mp. This case is rare, so we just ignore such
   437  		// allocations. Change MemProfileRate to 1 if you need to reproduce such
   438  		// cases for testing purposes.
   439  		return
   440  	}
   441  	// Only use the part of mp.profStack we need and ignore the extra space
   442  	// reserved for delayed inline expansion with frame pointer unwinding.
   443  	nstk := callers(3, mp.profStack[:debug.profstackdepth+2])
   444  	index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
   445  
   446  	b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
   447  	mr := b.mp()
   448  	mpc := &mr.future[index]
   449  
   450  	lock(&profMemFutureLock[index])
   451  	mpc.allocs++
   452  	unlock(&profMemFutureLock[index])
   453  
   454  	// Setprofilebucket locks a bunch of other mutexes, so we call it outside of
   455  	// the profiler locks. This reduces potential contention and chances of
   456  	// deadlocks. Since the object must be alive during the call to
   457  	// mProf_Malloc, it's fine to do this non-atomically.
   458  	systemstack(func() {
   459  		setprofilebucket(p, b)
   460  	})
   461  }
   462  
   463  // Called when freeing a profiled block.
   464  func mProf_Free(b *bucket) {
   465  	index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
   466  
   467  	mp := b.mp()
   468  	mpc := &mp.future[index]
   469  
   470  	lock(&profMemFutureLock[index])
   471  	mpc.frees++
   472  	unlock(&profMemFutureLock[index])
   473  }
   474  
   475  var blockprofilerate uint64 // in CPU ticks
   476  
   477  // SetBlockProfileRate controls the fraction of goroutine blocking events
   478  // that are reported in the blocking profile. The profiler aims to sample
   479  // an average of one blocking event per rate nanoseconds spent blocked.
   480  //
   481  // To include every blocking event in the profile, pass rate = 1.
   482  // To turn off profiling entirely, pass rate <= 0.
   483  func SetBlockProfileRate(rate int) {
   484  	var r int64
   485  	if rate <= 0 {
   486  		r = 0 // disable profiling
   487  	} else if rate == 1 {
   488  		r = 1 // profile everything
   489  	} else {
   490  		// convert ns to cycles, use float64 to prevent overflow during multiplication
   491  		r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000))
   492  		if r == 0 {
   493  			r = 1
   494  		}
   495  	}
   496  
   497  	atomic.Store64(&blockprofilerate, uint64(r))
   498  }
   499  
   500  func blockevent(cycles int64, skip int) {
   501  	if cycles <= 0 {
   502  		cycles = 1
   503  	}
   504  
   505  	rate := int64(atomic.Load64(&blockprofilerate))
   506  	if blocksampled(cycles, rate) {
   507  		saveblockevent(cycles, rate, skip+1, blockProfile)
   508  	}
   509  }
   510  
   511  // blocksampled returns true for all events where cycles >= rate. Shorter
   512  // events have a cycles/rate random chance of returning true.
   513  func blocksampled(cycles, rate int64) bool {
   514  	if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) {
   515  		return false
   516  	}
   517  	return true
   518  }
   519  
   520  // saveblockevent records a profile event of the type specified by which.
   521  // cycles is the quantity associated with this event and rate is the sampling rate,
   522  // used to adjust the cycles value in the manner determined by the profile type.
   523  // skip is the number of frames to omit from the traceback associated with the event.
   524  // The traceback will be recorded from the stack of the goroutine associated with the current m.
   525  // skip should be positive if this event is recorded from the current stack
   526  // (e.g. when this is not called from a system stack)
   527  func saveblockevent(cycles, rate int64, skip int, which bucketType) {
   528  	if debug.profstackdepth == 0 {
   529  		// profstackdepth is set to 0 by the user, so mp.profStack is nil and we
   530  		// can't record a stack trace.
   531  		return
   532  	}
   533  	if skip > maxSkip {
   534  		print("requested skip=", skip)
   535  		throw("invalid skip value")
   536  	}
   537  	gp := getg()
   538  	mp := acquirem() // we must not be preempted while accessing profstack
   539  
   540  	var nstk int
   541  	if tracefpunwindoff() || gp.m.hasCgoOnStack() {
   542  		if gp.m.curg == nil || gp.m.curg == gp {
   543  			nstk = callers(skip, mp.profStack)
   544  		} else {
   545  			nstk = gcallers(gp.m.curg, skip, mp.profStack)
   546  		}
   547  	} else {
   548  		if gp.m.curg == nil || gp.m.curg == gp {
   549  			if skip > 0 {
   550  				// We skip one fewer frame than the provided value for frame
   551  				// pointer unwinding because the skip value includes the current
   552  				// frame, whereas the saved frame pointer will give us the
   553  				// caller's return address first (so, not including
   554  				// saveblockevent)
   555  				skip -= 1
   556  			}
   557  			nstk = fpTracebackPartialExpand(skip, unsafe.Pointer(getfp()), mp.profStack)
   558  		} else {
   559  			mp.profStack[0] = gp.m.curg.sched.pc
   560  			nstk = 1 + fpTracebackPartialExpand(skip, unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[1:])
   561  		}
   562  	}
   563  
   564  	saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
   565  	releasem(mp)
   566  }
   567  
   568  // fpTracebackPartialExpand records a call stack obtained starting from fp.
   569  // This function will skip the given number of frames, properly accounting for
   570  // inlining, and save remaining frames as "physical" return addresses. The
   571  // consumer should later use CallersFrames or similar to expand inline frames.
   572  func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int {
   573  	var n int
   574  	lastFuncID := abi.FuncIDNormal
   575  	skipOrAdd := func(retPC uintptr) bool {
   576  		if skip > 0 {
   577  			skip--
   578  		} else if n < len(pcBuf) {
   579  			pcBuf[n] = retPC
   580  			n++
   581  		}
   582  		return n < len(pcBuf)
   583  	}
   584  	for n < len(pcBuf) && fp != nil {
   585  		// return addr sits one word above the frame pointer
   586  		pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
   587  
   588  		if skip > 0 {
   589  			callPC := pc - 1
   590  			fi := findfunc(callPC)
   591  			u, uf := newInlineUnwinder(fi, callPC)
   592  			for ; uf.valid(); uf = u.next(uf) {
   593  				sf := u.srcFunc(uf)
   594  				if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
   595  					// ignore wrappers
   596  				} else if more := skipOrAdd(uf.pc + 1); !more {
   597  					return n
   598  				}
   599  				lastFuncID = sf.funcID
   600  			}
   601  		} else {
   602  			// We've skipped the desired number of frames, so no need
   603  			// to perform further inline expansion now.
   604  			pcBuf[n] = pc
   605  			n++
   606  		}
   607  
   608  		// follow the frame pointer to the next one
   609  		fp = unsafe.Pointer(*(*uintptr)(fp))
   610  	}
   611  	return n
   612  }
   613  
   614  // mLockProfile holds information about the runtime-internal lock contention
   615  // experienced and caused by this M, to report in metrics and profiles.
   616  //
   617  // These measurements are subject to some notable constraints: First, the fast
   618  // path for lock and unlock must remain very fast, with a minimal critical
   619  // section. Second, the critical section during contention has to remain small
   620  // too, so low levels of contention are less likely to snowball into large ones.
   621  // The reporting code cannot acquire new locks until the M has released all
   622  // other locks, which means no memory allocations and encourages use of
   623  // (temporary) M-local storage.
   624  //
   625  // The M has space for storing one call stack that caused contention, and the
   626  // magnitude of that contention. It also has space to store the magnitude of
   627  // additional contention the M caused, since it might encounter several
   628  // contention events before it releases all of its locks and is thus able to
   629  // transfer the locally buffered call stack and magnitude into the profile.
   630  //
   631  // The M collects the call stack when it unlocks the contended lock. The
   632  // traceback takes place outside of the lock's critical section.
   633  //
   634  // The profile for contention on sync.Mutex blames the caller of Unlock for the
   635  // amount of contention experienced by the callers of Lock which had to wait.
   636  // When there are several critical sections, this allows identifying which of
   637  // them is responsible. We must match that reporting behavior for contention on
   638  // runtime-internal locks.
   639  //
   640  // When the M unlocks its last mutex, it transfers the locally buffered call
   641  // stack and magnitude into the profile. As part of that step, it also transfers
   642  // any "additional contention" time to the profile. Any lock contention that it
   643  // experiences while adding samples to the profile will be recorded later as
   644  // "additional contention" and not include a call stack, to avoid an echo.
   645  type mLockProfile struct {
   646  	waitTime   atomic.Int64 // (nanotime) total time this M has spent waiting in runtime.lockWithRank. Read by runtime/metrics.
   647  	stack      []uintptr    // call stack at the point of this M's unlock call, when other Ms had to wait
   648  	cycles     int64        // (cputicks) cycles attributable to "stack"
   649  	cyclesLost int64        // (cputicks) contention for which we weren't able to record a call stack
   650  	haveStack  bool         // stack and cycles are to be added to the mutex profile (even if cycles is 0)
   651  	disabled   bool         // attribute all time to "lost"
   652  }
   653  
   654  func (prof *mLockProfile) start() int64 {
   655  	if cheaprandn(gTrackingPeriod) == 0 {
   656  		return nanotime()
   657  	}
   658  	return 0
   659  }
   660  
   661  func (prof *mLockProfile) end(start int64) {
   662  	if start != 0 {
   663  		prof.waitTime.Add((nanotime() - start) * gTrackingPeriod)
   664  	}
   665  }
   666  
   667  // recordUnlock prepares data for later addition to the mutex contention
   668  // profile. The M may hold arbitrary locks during this call.
   669  //
   670  // From unlock2, we might not be holding a p in this code.
   671  //
   672  //go:nowritebarrierrec
   673  func (prof *mLockProfile) recordUnlock(cycles int64) {
   674  	if cycles < 0 {
   675  		cycles = 0
   676  	}
   677  
   678  	if prof.disabled {
   679  		// We're experiencing contention while attempting to report contention.
   680  		// Make a note of its magnitude, but don't allow it to be the sole cause
   681  		// of another contention report.
   682  		prof.cyclesLost += cycles
   683  		return
   684  	}
   685  
   686  	if prev := prof.cycles; prev > 0 {
   687  		// We can only store one call stack for runtime-internal lock contention
   688  		// on this M, and we've already got one. Decide which should stay, and
   689  		// add the other to the report for runtime._LostContendedRuntimeLock.
   690  		if cycles == 0 {
   691  			return
   692  		}
   693  		prevScore := cheaprandu64() % uint64(prev)
   694  		thisScore := cheaprandu64() % uint64(cycles)
   695  		if prevScore > thisScore {
   696  			prof.cyclesLost += cycles
   697  			return
   698  		} else {
   699  			prof.cyclesLost += prev
   700  		}
   701  	}
   702  	prof.captureStack()
   703  	prof.cycles = cycles
   704  }
   705  
   706  func (prof *mLockProfile) captureStack() {
   707  	if debug.profstackdepth == 0 {
   708  		// profstackdepth is set to 0 by the user, so mp.profStack is nil and we
   709  		// can't record a stack trace.
   710  		return
   711  	}
   712  
   713  	skip := 4 // runtime.(*mLockProfile).recordUnlock runtime.unlock2Wake runtime.unlock2 runtime.unlockWithRank
   714  	if staticLockRanking {
   715  		// When static lock ranking is enabled, we'll always be on the system
   716  		// stack at this point. There will be a runtime.unlockWithRank.func1
   717  		// frame, and if the call to runtime.unlock took place on a user stack
   718  		// then there'll also be a runtime.systemstack frame. To keep stack
   719  		// traces somewhat consistent whether or not static lock ranking is
   720  		// enabled, we'd like to skip those. But it's hard to tell how long
   721  		// we've been on the system stack so accept an extra frame in that case,
   722  		// with a leaf of "runtime.unlockWithRank runtime.unlock" instead of
   723  		// "runtime.unlock".
   724  		skip += 1 // runtime.unlockWithRank.func1
   725  	}
   726  	prof.haveStack = true
   727  
   728  	var nstk int
   729  	gp := getg()
   730  	sp := sys.GetCallerSP()
   731  	pc := sys.GetCallerPC()
   732  	systemstack(func() {
   733  		var u unwinder
   734  		u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
   735  		nstk = tracebackPCs(&u, skip, prof.stack)
   736  	})
   737  	if nstk < len(prof.stack) {
   738  		prof.stack[nstk] = 0
   739  	}
   740  }
   741  
   742  // store adds the M's local record to the mutex contention profile.
   743  //
   744  // From unlock2, we might not be holding a p in this code.
   745  //
   746  //go:nowritebarrierrec
   747  func (prof *mLockProfile) store() {
   748  	if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.haveStack {
   749  		prof.storeSlow()
   750  	}
   751  }
   752  
   753  func (prof *mLockProfile) storeSlow() {
   754  	// Report any contention we experience within this function as "lost"; it's
   755  	// important that the act of reporting a contention event not lead to a
   756  	// reportable contention event. This also means we can use prof.stack
   757  	// without copying, since it won't change during this function.
   758  	mp := acquirem()
   759  	prof.disabled = true
   760  
   761  	nstk := int(debug.profstackdepth)
   762  	for i := 0; i < nstk; i++ {
   763  		if pc := prof.stack[i]; pc == 0 {
   764  			nstk = i
   765  			break
   766  		}
   767  	}
   768  
   769  	cycles, lost := prof.cycles, prof.cyclesLost
   770  	prof.cycles, prof.cyclesLost = 0, 0
   771  	prof.haveStack = false
   772  
   773  	rate := int64(atomic.Load64(&mutexprofilerate))
   774  	saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
   775  	if lost > 0 {
   776  		lostStk := [...]uintptr{
   777  			abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
   778  		}
   779  		saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
   780  	}
   781  
   782  	prof.disabled = false
   783  	releasem(mp)
   784  }
   785  
   786  func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) {
   787  	b := stkbucket(which, 0, stk, true)
   788  	bp := b.bp()
   789  
   790  	lock(&profBlockLock)
   791  	// We want to up-scale the count and cycles according to the
   792  	// probability that the event was sampled. For block profile events,
   793  	// the sample probability is 1 if cycles >= rate, and cycles / rate
   794  	// otherwise. For mutex profile events, the sample probability is 1 / rate.
   795  	// We scale the events by 1 / (probability the event was sampled).
   796  	if which == blockProfile && cycles < rate {
   797  		// Remove sampling bias, see discussion on http://golang.org/cl/299991.
   798  		bp.count += float64(rate) / float64(cycles)
   799  		bp.cycles += rate
   800  	} else if which == mutexProfile {
   801  		bp.count += float64(rate)
   802  		bp.cycles += rate * cycles
   803  	} else {
   804  		bp.count++
   805  		bp.cycles += cycles
   806  	}
   807  	unlock(&profBlockLock)
   808  }
   809  
   810  var mutexprofilerate uint64 // fraction sampled
   811  
   812  // SetMutexProfileFraction controls the fraction of mutex contention events
   813  // that are reported in the mutex profile. On average 1/rate events are
   814  // reported. The previous rate is returned.
   815  //
   816  // To turn off profiling entirely, pass rate 0.
   817  // To just read the current rate, pass rate < 0.
   818  // (For n>1 the details of sampling may change.)
   819  func SetMutexProfileFraction(rate int) int {
   820  	if rate < 0 {
   821  		return int(mutexprofilerate)
   822  	}
   823  	old := mutexprofilerate
   824  	atomic.Store64(&mutexprofilerate, uint64(rate))
   825  	return int(old)
   826  }
   827  
   828  func mutexevent(cycles int64, skip int) {
   829  	if cycles < 0 {
   830  		cycles = 0
   831  	}
   832  	rate := int64(atomic.Load64(&mutexprofilerate))
   833  	if rate > 0 && cheaprand64()%rate == 0 {
   834  		saveblockevent(cycles, rate, skip+1, mutexProfile)
   835  	}
   836  }
   837  
   838  // Go interface to profile data.
   839  
   840  // A StackRecord describes a single execution stack.
   841  type StackRecord struct {
   842  	Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
   843  }
   844  
   845  // Stack returns the stack trace associated with the record,
   846  // a prefix of r.Stack0.
   847  func (r *StackRecord) Stack() []uintptr {
   848  	for i, v := range r.Stack0 {
   849  		if v == 0 {
   850  			return r.Stack0[0:i]
   851  		}
   852  	}
   853  	return r.Stack0[0:]
   854  }
   855  
   856  // MemProfileRate controls the fraction of memory allocations
   857  // that are recorded and reported in the memory profile.
   858  // The profiler aims to sample an average of
   859  // one allocation per MemProfileRate bytes allocated.
   860  //
   861  // To include every allocated block in the profile, set MemProfileRate to 1.
   862  // To turn off profiling entirely, set MemProfileRate to 0.
   863  //
   864  // The tools that process the memory profiles assume that the
   865  // profile rate is constant across the lifetime of the program
   866  // and equal to the current value. Programs that change the
   867  // memory profiling rate should do so just once, as early as
   868  // possible in the execution of the program (for example,
   869  // at the beginning of main).
   870  var MemProfileRate int = 512 * 1024
   871  
   872  // disableMemoryProfiling is set by the linker if memory profiling
   873  // is not used and the link type guarantees nobody else could use it
   874  // elsewhere.
   875  // We check if the runtime.memProfileInternal symbol is present.
   876  var disableMemoryProfiling bool
   877  
   878  // A MemProfileRecord describes the live objects allocated
   879  // by a particular call sequence (stack trace).
   880  type MemProfileRecord struct {
   881  	AllocBytes, FreeBytes     int64       // number of bytes allocated, freed
   882  	AllocObjects, FreeObjects int64       // number of objects allocated, freed
   883  	Stack0                    [32]uintptr // stack trace for this record; ends at first 0 entry
   884  }
   885  
   886  // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
   887  func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
   888  
   889  // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
   890  func (r *MemProfileRecord) InUseObjects() int64 {
   891  	return r.AllocObjects - r.FreeObjects
   892  }
   893  
   894  // Stack returns the stack trace associated with the record,
   895  // a prefix of r.Stack0.
   896  func (r *MemProfileRecord) Stack() []uintptr {
   897  	for i, v := range r.Stack0 {
   898  		if v == 0 {
   899  			return r.Stack0[0:i]
   900  		}
   901  	}
   902  	return r.Stack0[0:]
   903  }
   904  
   905  // MemProfile returns a profile of memory allocated and freed per allocation
   906  // site.
   907  //
   908  // MemProfile returns n, the number of records in the current memory profile.
   909  // If len(p) >= n, MemProfile copies the profile into p and returns n, true.
   910  // If len(p) < n, MemProfile does not change p and returns n, false.
   911  //
   912  // If inuseZero is true, the profile includes allocation records
   913  // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
   914  // These are sites where memory was allocated, but it has all
   915  // been released back to the runtime.
   916  //
   917  // The returned profile may be up to two garbage collection cycles old.
   918  // This is to avoid skewing the profile toward allocations; because
   919  // allocations happen in real time but frees are delayed until the garbage
   920  // collector performs sweeping, the profile only accounts for allocations
   921  // that have had a chance to be freed by the garbage collector.
   922  //
   923  // Most clients should use the runtime/pprof package or
   924  // the testing package's -test.memprofile flag instead
   925  // of calling MemProfile directly.
   926  func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
   927  	return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
   928  		copyMemProfileRecord(&p[0], r)
   929  		p = p[1:]
   930  	})
   931  }
   932  
   933  // memProfileInternal returns the number of records n in the profile. If there
   934  // are less than size records, copyFn is invoked for each record, and ok returns
   935  // true.
   936  //
   937  // The linker set disableMemoryProfiling to true to disable memory profiling
   938  // if this function is not reachable. Mark it noinline to ensure the symbol exists.
   939  // (This function is big and normally not inlined anyway.)
   940  // See also disableMemoryProfiling above and cmd/link/internal/ld/lib.go:linksetup.
   941  //
   942  //go:noinline
   943  func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool) {
   944  	cycle := mProfCycle.read()
   945  	// If we're between mProf_NextCycle and mProf_Flush, take care
   946  	// of flushing to the active profile so we only have to look
   947  	// at the active profile below.
   948  	index := cycle % uint32(len(memRecord{}.future))
   949  	lock(&profMemActiveLock)
   950  	lock(&profMemFutureLock[index])
   951  	mProf_FlushLocked(index)
   952  	unlock(&profMemFutureLock[index])
   953  	clear := true
   954  	head := (*bucket)(mbuckets.Load())
   955  	for b := head; b != nil; b = b.allnext {
   956  		mp := b.mp()
   957  		if inuseZero || mp.active.allocs != mp.active.frees {
   958  			n++
   959  		}
   960  		if mp.active.allocs != 0 || mp.active.frees != 0 {
   961  			clear = false
   962  		}
   963  	}
   964  	if clear {
   965  		// Absolutely no data, suggesting that a garbage collection
   966  		// has not yet happened. In order to allow profiling when
   967  		// garbage collection is disabled from the beginning of execution,
   968  		// accumulate all of the cycles, and recount buckets.
   969  		n = 0
   970  		for b := head; b != nil; b = b.allnext {
   971  			mp := b.mp()
   972  			for c := range mp.future {
   973  				lock(&profMemFutureLock[c])
   974  				mp.active.add(&mp.future[c])
   975  				mp.future[c] = memRecordCycle{}
   976  				unlock(&profMemFutureLock[c])
   977  			}
   978  			if inuseZero || mp.active.allocs != mp.active.frees {
   979  				n++
   980  			}
   981  		}
   982  	}
   983  	if n <= size {
   984  		ok = true
   985  		for b := head; b != nil; b = b.allnext {
   986  			mp := b.mp()
   987  			if inuseZero || mp.active.allocs != mp.active.frees {
   988  				r := profilerecord.MemProfileRecord{
   989  					ObjectSize:   int64(b.size),
   990  					AllocObjects: int64(mp.active.allocs),
   991  					FreeObjects:  int64(mp.active.frees),
   992  					Stack:        b.stk(),
   993  				}
   994  				copyFn(r)
   995  			}
   996  		}
   997  	}
   998  	unlock(&profMemActiveLock)
   999  	return
  1000  }
  1001  
  1002  func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord) {
  1003  	dst.AllocBytes = src.AllocObjects * src.ObjectSize
  1004  	dst.FreeBytes = src.FreeObjects * src.ObjectSize
  1005  	dst.AllocObjects = src.AllocObjects
  1006  	dst.FreeObjects = src.FreeObjects
  1007  	if raceenabled {
  1008  		racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(MemProfile))
  1009  	}
  1010  	if msanenabled {
  1011  		msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1012  	}
  1013  	if asanenabled {
  1014  		asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1015  	}
  1016  	i := copy(dst.Stack0[:], src.Stack)
  1017  	clear(dst.Stack0[i:])
  1018  }
  1019  
  1020  //go:linkname pprof_memProfileInternal
  1021  func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool) {
  1022  	return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
  1023  		p[0] = r
  1024  		p = p[1:]
  1025  	})
  1026  }
  1027  
  1028  func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
  1029  	lock(&profMemActiveLock)
  1030  	head := (*bucket)(mbuckets.Load())
  1031  	for b := head; b != nil; b = b.allnext {
  1032  		mp := b.mp()
  1033  		fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
  1034  	}
  1035  	unlock(&profMemActiveLock)
  1036  }
  1037  
  1038  // BlockProfileRecord describes blocking events originated
  1039  // at a particular call sequence (stack trace).
  1040  type BlockProfileRecord struct {
  1041  	Count  int64
  1042  	Cycles int64
  1043  	StackRecord
  1044  }
  1045  
  1046  // BlockProfile returns n, the number of records in the current blocking profile.
  1047  // If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
  1048  // If len(p) < n, BlockProfile does not change p and returns n, false.
  1049  //
  1050  // Most clients should use the [runtime/pprof] package or
  1051  // the [testing] package's -test.blockprofile flag instead
  1052  // of calling BlockProfile directly.
  1053  func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
  1054  	var m int
  1055  	n, ok = blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1056  		copyBlockProfileRecord(&p[m], r)
  1057  		m++
  1058  	})
  1059  	if ok {
  1060  		expandFrames(p[:n])
  1061  	}
  1062  	return
  1063  }
  1064  
  1065  func expandFrames(p []BlockProfileRecord) {
  1066  	expandedStack := makeProfStack()
  1067  	for i := range p {
  1068  		cf := CallersFrames(p[i].Stack())
  1069  		j := 0
  1070  		for j < len(expandedStack) {
  1071  			f, more := cf.Next()
  1072  			// f.PC is a "call PC", but later consumers will expect
  1073  			// "return PCs"
  1074  			expandedStack[j] = f.PC + 1
  1075  			j++
  1076  			if !more {
  1077  				break
  1078  			}
  1079  		}
  1080  		k := copy(p[i].Stack0[:], expandedStack[:j])
  1081  		clear(p[i].Stack0[k:])
  1082  	}
  1083  }
  1084  
  1085  // blockProfileInternal returns the number of records n in the profile. If there
  1086  // are less than size records, copyFn is invoked for each record, and ok returns
  1087  // true.
  1088  func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
  1089  	lock(&profBlockLock)
  1090  	head := (*bucket)(bbuckets.Load())
  1091  	for b := head; b != nil; b = b.allnext {
  1092  		n++
  1093  	}
  1094  	if n <= size {
  1095  		ok = true
  1096  		for b := head; b != nil; b = b.allnext {
  1097  			bp := b.bp()
  1098  			r := profilerecord.BlockProfileRecord{
  1099  				Count:  int64(bp.count),
  1100  				Cycles: bp.cycles,
  1101  				Stack:  b.stk(),
  1102  			}
  1103  			// Prevent callers from having to worry about division by zero errors.
  1104  			// See discussion on http://golang.org/cl/299991.
  1105  			if r.Count == 0 {
  1106  				r.Count = 1
  1107  			}
  1108  			copyFn(r)
  1109  		}
  1110  	}
  1111  	unlock(&profBlockLock)
  1112  	return
  1113  }
  1114  
  1115  // copyBlockProfileRecord copies the sample values and call stack from src to dst.
  1116  // The call stack is copied as-is. The caller is responsible for handling inline
  1117  // expansion, needed when the call stack was collected with frame pointer unwinding.
  1118  func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) {
  1119  	dst.Count = src.Count
  1120  	dst.Cycles = src.Cycles
  1121  	if raceenabled {
  1122  		racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(BlockProfile))
  1123  	}
  1124  	if msanenabled {
  1125  		msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1126  	}
  1127  	if asanenabled {
  1128  		asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
  1129  	}
  1130  	// We just copy the stack here without inline expansion
  1131  	// (needed if frame pointer unwinding is used)
  1132  	// since this function is called under the profile lock,
  1133  	// and doing something that might allocate can violate lock ordering.
  1134  	i := copy(dst.Stack0[:], src.Stack)
  1135  	clear(dst.Stack0[i:])
  1136  }
  1137  
  1138  //go:linkname pprof_blockProfileInternal
  1139  func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
  1140  	return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1141  		p[0] = r
  1142  		p = p[1:]
  1143  	})
  1144  }
  1145  
  1146  // MutexProfile returns n, the number of records in the current mutex profile.
  1147  // If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
  1148  // Otherwise, MutexProfile does not change p, and returns n, false.
  1149  //
  1150  // Most clients should use the [runtime/pprof] package
  1151  // instead of calling MutexProfile directly.
  1152  func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
  1153  	var m int
  1154  	n, ok = mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1155  		copyBlockProfileRecord(&p[m], r)
  1156  		m++
  1157  	})
  1158  	if ok {
  1159  		expandFrames(p[:n])
  1160  	}
  1161  	return
  1162  }
  1163  
  1164  // mutexProfileInternal returns the number of records n in the profile. If there
  1165  // are less than size records, copyFn is invoked for each record, and ok returns
  1166  // true.
  1167  func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
  1168  	lock(&profBlockLock)
  1169  	head := (*bucket)(xbuckets.Load())
  1170  	for b := head; b != nil; b = b.allnext {
  1171  		n++
  1172  	}
  1173  	if n <= size {
  1174  		ok = true
  1175  		for b := head; b != nil; b = b.allnext {
  1176  			bp := b.bp()
  1177  			r := profilerecord.BlockProfileRecord{
  1178  				Count:  int64(bp.count),
  1179  				Cycles: bp.cycles,
  1180  				Stack:  b.stk(),
  1181  			}
  1182  			copyFn(r)
  1183  		}
  1184  	}
  1185  	unlock(&profBlockLock)
  1186  	return
  1187  }
  1188  
  1189  //go:linkname pprof_mutexProfileInternal
  1190  func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
  1191  	return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
  1192  		p[0] = r
  1193  		p = p[1:]
  1194  	})
  1195  }
  1196  
  1197  // ThreadCreateProfile returns n, the number of records in the thread creation profile.
  1198  // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
  1199  // If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
  1200  //
  1201  // Most clients should use the runtime/pprof package instead
  1202  // of calling ThreadCreateProfile directly.
  1203  func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
  1204  	return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
  1205  		i := copy(p[0].Stack0[:], r.Stack)
  1206  		clear(p[0].Stack0[i:])
  1207  		p = p[1:]
  1208  	})
  1209  }
  1210  
  1211  // threadCreateProfileInternal returns the number of records n in the profile.
  1212  // If there are less than size records, copyFn is invoked for each record, and
  1213  // ok returns true.
  1214  func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) {
  1215  	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
  1216  	for mp := first; mp != nil; mp = mp.alllink {
  1217  		n++
  1218  	}
  1219  	if n <= size {
  1220  		ok = true
  1221  		for mp := first; mp != nil; mp = mp.alllink {
  1222  			r := profilerecord.StackRecord{Stack: mp.createstack[:]}
  1223  			copyFn(r)
  1224  		}
  1225  	}
  1226  	return
  1227  }
  1228  
  1229  //go:linkname pprof_threadCreateInternal
  1230  func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool) {
  1231  	return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
  1232  		p[0] = r
  1233  		p = p[1:]
  1234  	})
  1235  }
  1236  
  1237  //go:linkname pprof_goroutineProfileWithLabels
  1238  func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1239  	return goroutineProfileWithLabels(p, labels)
  1240  }
  1241  
  1242  // labels may be nil. If labels is non-nil, it must have the same length as p.
  1243  func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1244  	if labels != nil && len(labels) != len(p) {
  1245  		labels = nil
  1246  	}
  1247  
  1248  	return goroutineProfileWithLabelsConcurrent(p, labels)
  1249  }
  1250  
  1251  //go:linkname pprof_goroutineLeakProfileWithLabels
  1252  func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1253  	return goroutineLeakProfileWithLabels(p, labels)
  1254  }
  1255  
  1256  // labels may be nil. If labels is non-nil, it must have the same length as p.
  1257  func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1258  	if labels != nil && len(labels) != len(p) {
  1259  		labels = nil
  1260  	}
  1261  
  1262  	return goroutineLeakProfileWithLabelsConcurrent(p, labels)
  1263  }
  1264  
  1265  var goroutineProfile = struct {
  1266  	sema    uint32
  1267  	active  bool
  1268  	offset  atomic.Int64
  1269  	records []profilerecord.StackRecord
  1270  	labels  []unsafe.Pointer
  1271  }{
  1272  	sema: 1,
  1273  }
  1274  
  1275  // goroutineProfileState indicates the status of a goroutine's stack for the
  1276  // current in-progress goroutine profile. Goroutines' stacks are initially
  1277  // "Absent" from the profile, and end up "Satisfied" by the time the profile is
  1278  // complete. While a goroutine's stack is being captured, its
  1279  // goroutineProfileState will be "InProgress" and it will not be able to run
  1280  // until the capture completes and the state moves to "Satisfied".
  1281  //
  1282  // Some goroutines (the finalizer goroutine, which at various times can be
  1283  // either a "system" or a "user" goroutine, and the goroutine that is
  1284  // coordinating the profile, any goroutines created during the profile) move
  1285  // directly to the "Satisfied" state.
  1286  type goroutineProfileState uint32
  1287  
  1288  const (
  1289  	goroutineProfileAbsent goroutineProfileState = iota
  1290  	goroutineProfileInProgress
  1291  	goroutineProfileSatisfied
  1292  )
  1293  
  1294  type goroutineProfileStateHolder atomic.Uint32
  1295  
  1296  func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
  1297  	return goroutineProfileState((*atomic.Uint32)(p).Load())
  1298  }
  1299  
  1300  func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
  1301  	(*atomic.Uint32)(p).Store(uint32(value))
  1302  }
  1303  
  1304  func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
  1305  	return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
  1306  }
  1307  
  1308  func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1309  	if len(p) == 0 {
  1310  		// An empty slice is obviously too small. Return a rough
  1311  		// allocation estimate.
  1312  		return work.goroutineLeak.count, false
  1313  	}
  1314  
  1315  	pcbuf := makeProfStack() // see saveg() for explanation
  1316  
  1317  	// Prepare a profile large enough to store all leaked goroutines.
  1318  	n = work.goroutineLeak.count
  1319  
  1320  	if n > len(p) {
  1321  		// There's not enough space in p to store the whole profile, so
  1322  		// we're not allowed to write to p at all and must return n, false.
  1323  		return n, false
  1324  	}
  1325  
  1326  	// Visit each leaked goroutine and try to record its stack.
  1327  	var offset int
  1328  	forEachGRace(func(gp1 *g) {
  1329  		if readgstatus(gp1)&^_Gscan == _Gleaked {
  1330  			systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &p[offset], pcbuf) })
  1331  			if labels != nil {
  1332  				labels[offset] = gp1.labels
  1333  			}
  1334  			offset++
  1335  		}
  1336  	})
  1337  
  1338  	if raceenabled {
  1339  		raceacquire(unsafe.Pointer(&labelSync))
  1340  	}
  1341  
  1342  	return n, true
  1343  }
  1344  
  1345  func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1346  	if len(p) == 0 {
  1347  		// An empty slice is obviously too small. Return a rough
  1348  		// allocation estimate without bothering to STW. As long as
  1349  		// this is close, then we'll only need to STW once (on the next
  1350  		// call).
  1351  		return int(gcount(false)), false
  1352  	}
  1353  
  1354  	semacquire(&goroutineProfile.sema)
  1355  
  1356  	ourg := getg()
  1357  
  1358  	pcbuf := makeProfStack() // see saveg() for explanation
  1359  	stw := stopTheWorld(stwGoroutineProfile)
  1360  	// Using gcount while the world is stopped should give us a consistent view
  1361  	// of the number of live goroutines, minus the number of goroutines that are
  1362  	// alive and permanently marked as "system". But to make this count agree
  1363  	// with what we'd get from isSystemGoroutine, we need special handling for
  1364  	// goroutines that can vary between user and system to ensure that the count
  1365  	// doesn't change during the collection. So, check the finalizer goroutine
  1366  	// and cleanup goroutines in particular.
  1367  	n = int(gcount(false))
  1368  	if fingStatus.Load()&fingRunningFinalizer != 0 {
  1369  		n++
  1370  	}
  1371  	n += int(gcCleanups.running.Load())
  1372  
  1373  	if n > len(p) {
  1374  		// There's not enough space in p to store the whole profile, so (per the
  1375  		// contract of runtime.GoroutineProfile) we're not allowed to write to p
  1376  		// at all and must return n, false.
  1377  		startTheWorld(stw)
  1378  		semrelease(&goroutineProfile.sema)
  1379  		return n, false
  1380  	}
  1381  
  1382  	// Save current goroutine.
  1383  	sp := sys.GetCallerSP()
  1384  	pc := sys.GetCallerPC()
  1385  	systemstack(func() {
  1386  		saveg(pc, sp, ourg, &p[0], pcbuf)
  1387  	})
  1388  	if labels != nil {
  1389  		labels[0] = ourg.labels
  1390  	}
  1391  	ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
  1392  	goroutineProfile.offset.Store(1)
  1393  
  1394  	// Prepare for all other goroutines to enter the profile. Aside from ourg,
  1395  	// every goroutine struct in the allgs list has its goroutineProfiled field
  1396  	// cleared. Any goroutine created from this point on (while
  1397  	// goroutineProfile.active is set) will start with its goroutineProfiled
  1398  	// field set to goroutineProfileSatisfied.
  1399  	goroutineProfile.active = true
  1400  	goroutineProfile.records = p
  1401  	goroutineProfile.labels = labels
  1402  	startTheWorld(stw)
  1403  
  1404  	// Visit each goroutine that existed as of the startTheWorld call above.
  1405  	//
  1406  	// New goroutines may not be in this list, but we didn't want to know about
  1407  	// them anyway. If they do appear in this list (via reusing a dead goroutine
  1408  	// struct, or racing to launch between the world restarting and us getting
  1409  	// the list), they will already have their goroutineProfiled field set to
  1410  	// goroutineProfileSatisfied before their state transitions out of _Gdead.
  1411  	//
  1412  	// Any goroutine that the scheduler tries to execute concurrently with this
  1413  	// call will start by adding itself to the profile (before the act of
  1414  	// executing can cause any changes in its stack).
  1415  	forEachGRace(func(gp1 *g) {
  1416  		tryRecordGoroutineProfile(gp1, pcbuf, Gosched)
  1417  	})
  1418  
  1419  	stw = stopTheWorld(stwGoroutineProfileCleanup)
  1420  	endOffset := goroutineProfile.offset.Swap(0)
  1421  	goroutineProfile.active = false
  1422  	goroutineProfile.records = nil
  1423  	goroutineProfile.labels = nil
  1424  	startTheWorld(stw)
  1425  
  1426  	// Restore the invariant that every goroutine struct in allgs has its
  1427  	// goroutineProfiled field cleared.
  1428  	forEachGRace(func(gp1 *g) {
  1429  		gp1.goroutineProfiled.Store(goroutineProfileAbsent)
  1430  	})
  1431  
  1432  	if raceenabled {
  1433  		raceacquire(unsafe.Pointer(&labelSync))
  1434  	}
  1435  
  1436  	if n != int(endOffset) {
  1437  		// It's a big surprise that the number of goroutines changed while we
  1438  		// were collecting the profile. But probably better to return a
  1439  		// truncated profile than to crash the whole process.
  1440  		//
  1441  		// For instance, needm moves a goroutine out of the _Gdeadextra state and so
  1442  		// might be able to change the goroutine count without interacting with
  1443  		// the scheduler. For code like that, the race windows are small and the
  1444  		// combination of features is uncommon, so it's hard to be (and remain)
  1445  		// sure we've caught them all.
  1446  	}
  1447  
  1448  	semrelease(&goroutineProfile.sema)
  1449  	return n, true
  1450  }
  1451  
  1452  // tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
  1453  // tryRecordGoroutineProfile.
  1454  //
  1455  //go:yeswritebarrierrec
  1456  func tryRecordGoroutineProfileWB(gp1 *g) {
  1457  	if getg().m.p.ptr() == nil {
  1458  		throw("no P available, write barriers are forbidden")
  1459  	}
  1460  	tryRecordGoroutineProfile(gp1, nil, osyield)
  1461  }
  1462  
  1463  // tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
  1464  // in the current goroutine profile: either that it should not be profiled, or
  1465  // that a snapshot of its call stack and labels are now in the profile.
  1466  func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
  1467  	if status := readgstatus(gp1); status == _Gdead || status == _Gdeadextra {
  1468  		// Dead goroutines should not appear in the profile. Goroutines that
  1469  		// start while profile collection is active will get goroutineProfiled
  1470  		// set to goroutineProfileSatisfied before transitioning out of _Gdead,
  1471  		// so here we check _Gdead first.
  1472  		return
  1473  	}
  1474  
  1475  	for {
  1476  		prev := gp1.goroutineProfiled.Load()
  1477  		if prev == goroutineProfileSatisfied {
  1478  			// This goroutine is already in the profile (or is new since the
  1479  			// start of collection, so shouldn't appear in the profile).
  1480  			break
  1481  		}
  1482  		if prev == goroutineProfileInProgress {
  1483  			// Something else is adding gp1 to the goroutine profile right now.
  1484  			// Give that a moment to finish.
  1485  			yield()
  1486  			continue
  1487  		}
  1488  
  1489  		// While we have gp1.goroutineProfiled set to
  1490  		// goroutineProfileInProgress, gp1 may appear _Grunnable but will not
  1491  		// actually be able to run. Disable preemption for ourselves, to make
  1492  		// sure we finish profiling gp1 right away instead of leaving it stuck
  1493  		// in this limbo.
  1494  		mp := acquirem()
  1495  		if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
  1496  			doRecordGoroutineProfile(gp1, pcbuf)
  1497  			gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
  1498  		}
  1499  		releasem(mp)
  1500  	}
  1501  }
  1502  
  1503  // doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
  1504  // goroutine profile. Preemption is disabled.
  1505  //
  1506  // This may be called via tryRecordGoroutineProfile in two ways: by the
  1507  // goroutine that is coordinating the goroutine profile (running on its own
  1508  // stack), or from the scheduler in preparation to execute gp1 (running on the
  1509  // system stack).
  1510  func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
  1511  	if isSystemGoroutine(gp1, false) {
  1512  		// System goroutines should not appear in the profile.
  1513  		// Check this here and not in tryRecordGoroutineProfile because isSystemGoroutine
  1514  		// may change on a goroutine while it is executing, so while the scheduler might
  1515  		// see a system goroutine, goroutineProfileWithLabelsConcurrent might not, and
  1516  		// this inconsistency could cause invariants to be violated, such as trying to
  1517  		// record the stack of a running goroutine below. In short, we still want system
  1518  		// goroutines to participate in the same state machine on gp1.goroutineProfiled as
  1519  		// everything else, we just don't record the stack in the profile.
  1520  		return
  1521  	}
  1522  	// Double-check that we didn't make a grave mistake. If the G is running then in
  1523  	// general, we cannot safely read its stack.
  1524  	//
  1525  	// However, there is one case where it's OK. There's a small window of time in
  1526  	// exitsyscall where a goroutine could be in _Grunning as it's exiting a syscall.
  1527  	// This is OK because goroutine will not exit the syscall until it passes through
  1528  	// a call to tryRecordGoroutineProfile. (An explicit one on the fast path, an
  1529  	// implicit one via the scheduler on the slow path.)
  1530  	//
  1531  	// This is also why it's safe to check syscallsp here. The syscall path mutates
  1532  	// syscallsp only after passing through tryRecordGoroutineProfile.
  1533  	if readgstatus(gp1) == _Grunning && gp1.syscallsp == 0 {
  1534  		print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
  1535  		throw("cannot read stack of running goroutine")
  1536  	}
  1537  
  1538  	offset := int(goroutineProfile.offset.Add(1)) - 1
  1539  
  1540  	if offset >= len(goroutineProfile.records) {
  1541  		// Should be impossible, but better to return a truncated profile than
  1542  		// to crash the entire process at this point. Instead, deal with it in
  1543  		// goroutineProfileWithLabelsConcurrent where we have more context.
  1544  		return
  1545  	}
  1546  
  1547  	// saveg calls gentraceback, which may call cgo traceback functions. When
  1548  	// called from the scheduler, this is on the system stack already so
  1549  	// traceback.go:cgoContextPCs will avoid calling back into the scheduler.
  1550  	//
  1551  	// When called from the goroutine coordinating the profile, we still have
  1552  	// set gp1.goroutineProfiled to goroutineProfileInProgress and so are still
  1553  	// preventing it from being truly _Grunnable. So we'll use the system stack
  1554  	// to avoid schedule delays.
  1555  	systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset], pcbuf) })
  1556  
  1557  	if goroutineProfile.labels != nil {
  1558  		goroutineProfile.labels[offset] = gp1.labels
  1559  	}
  1560  }
  1561  
  1562  func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
  1563  	gp := getg()
  1564  
  1565  	isOK := func(gp1 *g) bool {
  1566  		// Checking isSystemGoroutine here makes GoroutineProfile
  1567  		// consistent with both NumGoroutine and Stack.
  1568  		if gp1 == gp {
  1569  			return false
  1570  		}
  1571  		if status := readgstatus(gp1); status == _Gdead || status == _Gdeadextra {
  1572  			return false
  1573  		}
  1574  		if isSystemGoroutine(gp1, false) {
  1575  			return false
  1576  		}
  1577  		return true
  1578  	}
  1579  
  1580  	pcbuf := makeProfStack() // see saveg() for explanation
  1581  	stw := stopTheWorld(stwGoroutineProfile)
  1582  
  1583  	// World is stopped, no locking required.
  1584  	n = 1
  1585  	forEachGRace(func(gp1 *g) {
  1586  		if isOK(gp1) {
  1587  			n++
  1588  		}
  1589  	})
  1590  
  1591  	if n <= len(p) {
  1592  		ok = true
  1593  		r, lbl := p, labels
  1594  
  1595  		// Save current goroutine.
  1596  		sp := sys.GetCallerSP()
  1597  		pc := sys.GetCallerPC()
  1598  		systemstack(func() {
  1599  			saveg(pc, sp, gp, &r[0], pcbuf)
  1600  		})
  1601  		r = r[1:]
  1602  
  1603  		// If we have a place to put our goroutine labelmap, insert it there.
  1604  		if labels != nil {
  1605  			lbl[0] = gp.labels
  1606  			lbl = lbl[1:]
  1607  		}
  1608  
  1609  		// Save other goroutines.
  1610  		forEachGRace(func(gp1 *g) {
  1611  			if !isOK(gp1) {
  1612  				return
  1613  			}
  1614  
  1615  			if len(r) == 0 {
  1616  				// Should be impossible, but better to return a
  1617  				// truncated profile than to crash the entire process.
  1618  				return
  1619  			}
  1620  			// saveg calls gentraceback, which may call cgo traceback functions.
  1621  			// The world is stopped, so it cannot use cgocall (which will be
  1622  			// blocked at exitsyscall). Do it on the system stack so it won't
  1623  			// call into the schedular (see traceback.go:cgoContextPCs).
  1624  			systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0], pcbuf) })
  1625  			if labels != nil {
  1626  				lbl[0] = gp1.labels
  1627  				lbl = lbl[1:]
  1628  			}
  1629  			r = r[1:]
  1630  		})
  1631  	}
  1632  
  1633  	if raceenabled {
  1634  		raceacquire(unsafe.Pointer(&labelSync))
  1635  	}
  1636  
  1637  	startTheWorld(stw)
  1638  	return n, ok
  1639  }
  1640  
  1641  // GoroutineProfile returns n, the number of records in the active goroutine stack profile.
  1642  // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
  1643  // If len(p) < n, GoroutineProfile does not change p and returns n, false.
  1644  //
  1645  // Most clients should use the [runtime/pprof] package instead
  1646  // of calling GoroutineProfile directly.
  1647  func GoroutineProfile(p []StackRecord) (n int, ok bool) {
  1648  	records := make([]profilerecord.StackRecord, len(p))
  1649  	n, ok = goroutineProfileInternal(records)
  1650  	if !ok {
  1651  		return
  1652  	}
  1653  	for i, mr := range records[0:n] {
  1654  		l := copy(p[i].Stack0[:], mr.Stack)
  1655  		clear(p[i].Stack0[l:])
  1656  	}
  1657  	return
  1658  }
  1659  
  1660  func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool) {
  1661  	return goroutineProfileWithLabels(p, nil)
  1662  }
  1663  
  1664  func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
  1665  	// To reduce memory usage, we want to allocate a r.Stack that is just big
  1666  	// enough to hold gp's stack trace. Naively we might achieve this by
  1667  	// recording our stack trace into mp.profStack, and then allocating a
  1668  	// r.Stack of the right size. However, mp.profStack is also used for
  1669  	// allocation profiling, so it could get overwritten if the slice allocation
  1670  	// gets profiled. So instead we record the stack trace into a temporary
  1671  	// pcbuf which is usually given to us by our caller. When it's not, we have
  1672  	// to allocate one here. This will only happen for goroutines that were in a
  1673  	// syscall when the goroutine profile started or for goroutines that manage
  1674  	// to execute before we finish iterating over all the goroutines.
  1675  	if pcbuf == nil {
  1676  		pcbuf = makeProfStack()
  1677  	}
  1678  
  1679  	var u unwinder
  1680  	u.initAt(pc, sp, 0, gp, unwindSilentErrors)
  1681  	n := tracebackPCs(&u, 0, pcbuf)
  1682  	r.Stack = make([]uintptr, n)
  1683  	copy(r.Stack, pcbuf)
  1684  }
  1685  
  1686  // Stack formats a stack trace of the calling goroutine into buf
  1687  // and returns the number of bytes written to buf.
  1688  // If all is true, Stack formats stack traces of all other goroutines
  1689  // into buf after the trace for the current goroutine.
  1690  func Stack(buf []byte, all bool) int {
  1691  	var stw worldStop
  1692  	if all {
  1693  		stw = stopTheWorld(stwAllGoroutinesStack)
  1694  	}
  1695  
  1696  	n := 0
  1697  	if len(buf) > 0 {
  1698  		gp := getg()
  1699  		sp := sys.GetCallerSP()
  1700  		pc := sys.GetCallerPC()
  1701  		systemstack(func() {
  1702  			g0 := getg()
  1703  			// Force traceback=1 to override GOTRACEBACK setting,
  1704  			// so that Stack's results are consistent.
  1705  			// GOTRACEBACK is only about crash dumps.
  1706  			g0.m.traceback = 1
  1707  			g0.writebuf = buf[0:0:len(buf)]
  1708  			goroutineheader(gp)
  1709  			traceback(pc, sp, 0, gp)
  1710  			if all {
  1711  				tracebackothers(gp)
  1712  			}
  1713  			g0.m.traceback = 0
  1714  			n = len(g0.writebuf)
  1715  			g0.writebuf = nil
  1716  		})
  1717  	}
  1718  
  1719  	if all {
  1720  		startTheWorld(stw)
  1721  	}
  1722  	return n
  1723  }
  1724  

View as plain text