Source file src/runtime/stack.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/cpu"
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"internal/runtime/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  /*
    18  Stack layout parameters.
    19  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    20  
    21  The per-goroutine g->stackguard is set to point StackGuard bytes
    22  above the bottom of the stack.  Each function compares its stack
    23  pointer against g->stackguard to check for overflow.  To cut one
    24  instruction from the check sequence for functions with tiny frames,
    25  the stack is allowed to protrude StackSmall bytes below the stack
    26  guard.  Functions with large frames don't bother with the check and
    27  always call morestack.  The sequences are (for amd64, others are
    28  similar):
    29  
    30  	guard = g->stackguard
    31  	frame = function's stack frame size
    32  	argsize = size of function arguments (call + return)
    33  
    34  	stack frame size <= StackSmall:
    35  		CMPQ guard, SP
    36  		JHI 3(PC)
    37  		MOVQ m->morearg, $(argsize << 32)
    38  		CALL morestack(SB)
    39  
    40  	stack frame size > StackSmall but < StackBig
    41  		LEAQ (frame-StackSmall)(SP), R0
    42  		CMPQ guard, R0
    43  		JHI 3(PC)
    44  		MOVQ m->morearg, $(argsize << 32)
    45  		CALL morestack(SB)
    46  
    47  	stack frame size >= StackBig:
    48  		MOVQ m->morearg, $((argsize << 32) | frame)
    49  		CALL morestack(SB)
    50  
    51  The bottom StackGuard - StackSmall bytes are important: there has
    52  to be enough room to execute functions that refuse to check for
    53  stack overflow, either because they need to be adjacent to the
    54  actual caller's frame (deferproc) or because they handle the imminent
    55  stack overflow (morestack).
    56  
    57  For example, deferproc might call malloc, which does one of the
    58  above checks (without allocating a full frame), which might trigger
    59  a call to morestack.  This sequence needs to fit in the bottom
    60  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    61  deferproc's frame is 56 bytes.  That fits well within the
    62  StackGuard - StackSmall bytes at the bottom.
    63  The linkers explore all possible call traces involving non-splitting
    64  functions to make sure that this limit cannot be violated.
    65  */
    66  
    67  const (
    68  	// stackSystem is a number of additional bytes to add
    69  	// to each stack below the usual guard area for OS-specific
    70  	// purposes like signal handling. Used on Windows, Plan 9,
    71  	// and iOS because they do not use a separate stack.
    72  	stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
    73  
    74  	// The minimum size of stack used by Go code
    75  	stackMin = 2048
    76  
    77  	// The minimum stack size to allocate.
    78  	// The hackery here rounds fixedStack0 up to a power of 2.
    79  	fixedStack0 = stackMin + stackSystem
    80  	fixedStack1 = fixedStack0 - 1
    81  	fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
    82  	fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
    83  	fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
    84  	fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
    85  	fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
    86  	fixedStack  = fixedStack6 + 1
    87  
    88  	// stackNosplit is the maximum number of bytes that a chain of NOSPLIT
    89  	// functions can use.
    90  	// This arithmetic must match that in cmd/internal/objabi/stack.go:StackNosplit.
    91  	stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
    92  
    93  	// The stack guard is a pointer this many bytes above the
    94  	// bottom of the stack.
    95  	//
    96  	// The guard leaves enough room for a stackNosplit chain of NOSPLIT calls
    97  	// plus one stackSmall frame plus stackSystem bytes for the OS.
    98  	// This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.
    99  	stackGuard = stackNosplit + stackSystem + abi.StackSmall
   100  )
   101  
   102  const (
   103  	// stackDebug == 0: no logging
   104  	//            == 1: logging of per-stack operations
   105  	//            == 2: logging of per-frame operations
   106  	//            == 3: logging of per-word updates
   107  	//            == 4: logging of per-word reads
   108  	stackDebug       = 0
   109  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   110  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   111  	stackNoCache     = 0 // disable per-P small stack caches
   112  
   113  	// check the BP links during traceback.
   114  	debugCheckBP = false
   115  )
   116  
   117  var (
   118  	stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   119  )
   120  
   121  const (
   122  	uintptrMask = 1<<(8*goarch.PtrSize) - 1
   123  
   124  	// The values below can be stored to g.stackguard0 to force
   125  	// the next stack check to fail.
   126  	// These are all larger than any real SP.
   127  
   128  	// Goroutine preemption request.
   129  	// 0xfffffade in hex.
   130  	stackPreempt = uintptrMask & -1314
   131  
   132  	// Thread is forking. Causes a split stack check failure.
   133  	// 0xfffffb2e in hex.
   134  	stackFork = uintptrMask & -1234
   135  
   136  	// Force a stack movement. Used for debugging.
   137  	// 0xfffffeed in hex.
   138  	stackForceMove = uintptrMask & -275
   139  
   140  	// stackPoisonMin is the lowest allowed stack poison value.
   141  	stackPoisonMin = uintptrMask & -4096
   142  )
   143  
   144  // Global pool of spans that have free stacks.
   145  // Stacks are assigned an order according to size.
   146  //
   147  //	order = log_2(size/FixedStack)
   148  //
   149  // There is a free list for each order.
   150  var stackpool [_NumStackOrders]struct {
   151  	item stackpoolItem
   152  	_    [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
   153  }
   154  
   155  type stackpoolItem struct {
   156  	_    sys.NotInHeap
   157  	mu   mutex
   158  	span mSpanList
   159  }
   160  
   161  // Global pool of large stack spans.
   162  var stackLarge struct {
   163  	lock mutex
   164  	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   165  }
   166  
   167  func stackinit() {
   168  	if _StackCacheSize&_PageMask != 0 {
   169  		throw("cache size must be a multiple of page size")
   170  	}
   171  	for i := range stackpool {
   172  		stackpool[i].item.span.init()
   173  		lockInit(&stackpool[i].item.mu, lockRankStackpool)
   174  	}
   175  	for i := range stackLarge.free {
   176  		stackLarge.free[i].init()
   177  		lockInit(&stackLarge.lock, lockRankStackLarge)
   178  	}
   179  }
   180  
   181  // stacklog2 returns ⌊log_2(n)⌋.
   182  func stacklog2(n uintptr) int {
   183  	log2 := 0
   184  	for n > 1 {
   185  		n >>= 1
   186  		log2++
   187  	}
   188  	return log2
   189  }
   190  
   191  // Allocates a stack from the free pool. Must be called with
   192  // stackpool[order].item.mu held.
   193  func stackpoolalloc(order uint8) gclinkptr {
   194  	list := &stackpool[order].item.span
   195  	s := list.first
   196  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   197  	if s == nil {
   198  		// no free stacks. Allocate another span worth.
   199  		s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
   200  		if s == nil {
   201  			throw("out of memory")
   202  		}
   203  		if s.allocCount != 0 {
   204  			throw("bad allocCount")
   205  		}
   206  		if s.manualFreeList.ptr() != nil {
   207  			throw("bad manualFreeList")
   208  		}
   209  		osStackAlloc(s)
   210  		s.elemsize = fixedStack << order
   211  		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   212  			x := gclinkptr(s.base() + i)
   213  			x.ptr().next = s.manualFreeList
   214  			s.manualFreeList = x
   215  		}
   216  		list.insert(s)
   217  	}
   218  	x := s.manualFreeList
   219  	if x.ptr() == nil {
   220  		throw("span has no free stacks")
   221  	}
   222  	s.manualFreeList = x.ptr().next
   223  	s.allocCount++
   224  	if s.manualFreeList.ptr() == nil {
   225  		// all stacks in s are allocated.
   226  		list.remove(s)
   227  	}
   228  	return x
   229  }
   230  
   231  // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
   232  func stackpoolfree(x gclinkptr, order uint8) {
   233  	s := spanOfUnchecked(uintptr(x))
   234  	if s.state.get() != mSpanManual {
   235  		throw("freeing stack not in a stack span")
   236  	}
   237  	if s.manualFreeList.ptr() == nil {
   238  		// s will now have a free stack
   239  		stackpool[order].item.span.insert(s)
   240  	}
   241  	x.ptr().next = s.manualFreeList
   242  	s.manualFreeList = x
   243  	s.allocCount--
   244  	if gcphase == _GCoff && s.allocCount == 0 {
   245  		// Span is completely free. Return it to the heap
   246  		// immediately if we're sweeping.
   247  		//
   248  		// If GC is active, we delay the free until the end of
   249  		// GC to avoid the following type of situation:
   250  		//
   251  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   252  		// 2) The stack that pointer points to is copied
   253  		// 3) The old stack is freed
   254  		// 4) The containing span is marked free
   255  		// 5) GC attempts to mark the SudoG.elem pointer. The
   256  		//    marking fails because the pointer looks like a
   257  		//    pointer into a free span.
   258  		//
   259  		// By not freeing, we prevent step #4 until GC is done.
   260  		stackpool[order].item.span.remove(s)
   261  		s.manualFreeList = 0
   262  		osStackFree(s)
   263  		mheap_.freeManual(s, spanAllocStack)
   264  	}
   265  }
   266  
   267  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   268  // The pool is required to prevent unlimited growth of per-thread caches.
   269  //
   270  //go:systemstack
   271  func stackcacherefill(c *mcache, order uint8) {
   272  	if stackDebug >= 1 {
   273  		print("stackcacherefill order=", order, "\n")
   274  	}
   275  
   276  	// Grab some stacks from the global cache.
   277  	// Grab half of the allowed capacity (to prevent thrashing).
   278  	var list gclinkptr
   279  	var size uintptr
   280  	lock(&stackpool[order].item.mu)
   281  	for size < _StackCacheSize/2 {
   282  		x := stackpoolalloc(order)
   283  		x.ptr().next = list
   284  		list = x
   285  		size += fixedStack << order
   286  	}
   287  	unlock(&stackpool[order].item.mu)
   288  	c.stackcache[order].list = list
   289  	c.stackcache[order].size = size
   290  }
   291  
   292  //go:systemstack
   293  func stackcacherelease(c *mcache, order uint8) {
   294  	if stackDebug >= 1 {
   295  		print("stackcacherelease order=", order, "\n")
   296  	}
   297  	x := c.stackcache[order].list
   298  	size := c.stackcache[order].size
   299  	lock(&stackpool[order].item.mu)
   300  	for size > _StackCacheSize/2 {
   301  		y := x.ptr().next
   302  		stackpoolfree(x, order)
   303  		x = y
   304  		size -= fixedStack << order
   305  	}
   306  	unlock(&stackpool[order].item.mu)
   307  	c.stackcache[order].list = x
   308  	c.stackcache[order].size = size
   309  }
   310  
   311  //go:systemstack
   312  func stackcache_clear(c *mcache) {
   313  	if stackDebug >= 1 {
   314  		print("stackcache clear\n")
   315  	}
   316  	for order := uint8(0); order < _NumStackOrders; order++ {
   317  		lock(&stackpool[order].item.mu)
   318  		x := c.stackcache[order].list
   319  		for x.ptr() != nil {
   320  			y := x.ptr().next
   321  			stackpoolfree(x, order)
   322  			x = y
   323  		}
   324  		c.stackcache[order].list = 0
   325  		c.stackcache[order].size = 0
   326  		unlock(&stackpool[order].item.mu)
   327  	}
   328  }
   329  
   330  // stackalloc allocates an n byte stack.
   331  //
   332  // stackalloc must run on the system stack because it uses per-P
   333  // resources and must not split the stack.
   334  //
   335  //go:systemstack
   336  func stackalloc(n uint32) stack {
   337  	// Stackalloc must be called on scheduler stack, so that we
   338  	// never try to grow the stack during the code that stackalloc runs.
   339  	// Doing so would cause a deadlock (issue 1547).
   340  	thisg := getg()
   341  	if thisg != thisg.m.g0 {
   342  		throw("stackalloc not on scheduler stack")
   343  	}
   344  	if n&(n-1) != 0 {
   345  		throw("stack size not a power of 2")
   346  	}
   347  	if stackDebug >= 1 {
   348  		print("stackalloc ", n, "\n")
   349  	}
   350  
   351  	if debug.efence != 0 || stackFromSystem != 0 {
   352  		n = uint32(alignUp(uintptr(n), physPageSize))
   353  		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   354  		if v == nil {
   355  			throw("out of memory (stackalloc)")
   356  		}
   357  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   358  	}
   359  
   360  	// Small stacks are allocated with a fixed-size free-list allocator.
   361  	// If we need a stack of a bigger size, we fall back on allocating
   362  	// a dedicated span.
   363  	var v unsafe.Pointer
   364  	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
   365  		order := uint8(0)
   366  		n2 := n
   367  		for n2 > fixedStack {
   368  			order++
   369  			n2 >>= 1
   370  		}
   371  		var x gclinkptr
   372  		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
   373  			// thisg.m.p == 0 can happen in the guts of exitsyscall
   374  			// or procresize. Just get a stack from the global pool.
   375  			// Also don't touch stackcache during gc
   376  			// as it's flushed concurrently.
   377  			lock(&stackpool[order].item.mu)
   378  			x = stackpoolalloc(order)
   379  			unlock(&stackpool[order].item.mu)
   380  		} else {
   381  			c := thisg.m.p.ptr().mcache
   382  			x = c.stackcache[order].list
   383  			if x.ptr() == nil {
   384  				stackcacherefill(c, order)
   385  				x = c.stackcache[order].list
   386  			}
   387  			c.stackcache[order].list = x.ptr().next
   388  			c.stackcache[order].size -= uintptr(n)
   389  		}
   390  		v = unsafe.Pointer(x)
   391  	} else {
   392  		var s *mspan
   393  		npage := uintptr(n) >> _PageShift
   394  		log2npage := stacklog2(npage)
   395  
   396  		// Try to get a stack from the large stack cache.
   397  		lock(&stackLarge.lock)
   398  		if !stackLarge.free[log2npage].isEmpty() {
   399  			s = stackLarge.free[log2npage].first
   400  			stackLarge.free[log2npage].remove(s)
   401  		}
   402  		unlock(&stackLarge.lock)
   403  
   404  		lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   405  
   406  		if s == nil {
   407  			// Allocate a new stack from the heap.
   408  			s = mheap_.allocManual(npage, spanAllocStack)
   409  			if s == nil {
   410  				throw("out of memory")
   411  			}
   412  			osStackAlloc(s)
   413  			s.elemsize = uintptr(n)
   414  		}
   415  		v = unsafe.Pointer(s.base())
   416  	}
   417  
   418  	if traceAllocFreeEnabled() {
   419  		trace := traceAcquire()
   420  		if trace.ok() {
   421  			trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
   422  			traceRelease(trace)
   423  		}
   424  	}
   425  	if raceenabled {
   426  		racemalloc(v, uintptr(n))
   427  	}
   428  	if msanenabled {
   429  		msanmalloc(v, uintptr(n))
   430  	}
   431  	if asanenabled {
   432  		asanunpoison(v, uintptr(n))
   433  	}
   434  	if stackDebug >= 1 {
   435  		print("  allocated ", v, "\n")
   436  	}
   437  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   438  }
   439  
   440  // stackfree frees an n byte stack allocation at stk.
   441  //
   442  // stackfree must run on the system stack because it uses per-P
   443  // resources and must not split the stack.
   444  //
   445  //go:systemstack
   446  func stackfree(stk stack) {
   447  	gp := getg()
   448  	v := unsafe.Pointer(stk.lo)
   449  	n := stk.hi - stk.lo
   450  	if n&(n-1) != 0 {
   451  		throw("stack not a power of 2")
   452  	}
   453  	if stk.lo+n < stk.hi {
   454  		throw("bad stack size")
   455  	}
   456  	if stackDebug >= 1 {
   457  		println("stackfree", v, n)
   458  		memclrNoHeapPointers(v, n) // for testing, clobber stack data
   459  	}
   460  	if debug.efence != 0 || stackFromSystem != 0 {
   461  		if debug.efence != 0 || stackFaultOnFree != 0 {
   462  			sysFault(v, n)
   463  		} else {
   464  			sysFree(v, n, &memstats.stacks_sys)
   465  		}
   466  		return
   467  	}
   468  	if traceAllocFreeEnabled() {
   469  		trace := traceAcquire()
   470  		if trace.ok() {
   471  			trace.GoroutineStackFree(uintptr(v))
   472  			traceRelease(trace)
   473  		}
   474  	}
   475  	if msanenabled {
   476  		msanfree(v, n)
   477  	}
   478  	if asanenabled {
   479  		asanpoison(v, n)
   480  	}
   481  	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
   482  		order := uint8(0)
   483  		n2 := n
   484  		for n2 > fixedStack {
   485  			order++
   486  			n2 >>= 1
   487  		}
   488  		x := gclinkptr(v)
   489  		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
   490  			lock(&stackpool[order].item.mu)
   491  			stackpoolfree(x, order)
   492  			unlock(&stackpool[order].item.mu)
   493  		} else {
   494  			c := gp.m.p.ptr().mcache
   495  			if c.stackcache[order].size >= _StackCacheSize {
   496  				stackcacherelease(c, order)
   497  			}
   498  			x.ptr().next = c.stackcache[order].list
   499  			c.stackcache[order].list = x
   500  			c.stackcache[order].size += n
   501  		}
   502  	} else {
   503  		s := spanOfUnchecked(uintptr(v))
   504  		if s.state.get() != mSpanManual {
   505  			println(hex(s.base()), v)
   506  			throw("bad span state")
   507  		}
   508  		if gcphase == _GCoff {
   509  			// Free the stack immediately if we're
   510  			// sweeping.
   511  			osStackFree(s)
   512  			mheap_.freeManual(s, spanAllocStack)
   513  		} else {
   514  			// If the GC is running, we can't return a
   515  			// stack span to the heap because it could be
   516  			// reused as a heap span, and this state
   517  			// change would race with GC. Add it to the
   518  			// large stack cache instead.
   519  			log2npage := stacklog2(s.npages)
   520  			lock(&stackLarge.lock)
   521  			stackLarge.free[log2npage].insert(s)
   522  			unlock(&stackLarge.lock)
   523  		}
   524  	}
   525  }
   526  
   527  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   528  
   529  var maxstackceiling = maxstacksize
   530  
   531  var ptrnames = []string{
   532  	0: "scalar",
   533  	1: "ptr",
   534  }
   535  
   536  // Stack frame layout
   537  //
   538  // (x86)
   539  // +------------------+
   540  // | args from caller |
   541  // +------------------+ <- frame->argp
   542  // |  return address  |
   543  // +------------------+
   544  // |  caller's BP (*) | (*) if framepointer_enabled && varp > sp
   545  // +------------------+ <- frame->varp
   546  // |     locals       |
   547  // +------------------+
   548  // |  args to callee  |
   549  // +------------------+ <- frame->sp
   550  //
   551  // (arm)
   552  // +------------------+
   553  // | args from caller |
   554  // +------------------+ <- frame->argp
   555  // | caller's retaddr |
   556  // +------------------+
   557  // |  caller's FP (*) | (*) on ARM64, if framepointer_enabled && varp > sp
   558  // +------------------+ <- frame->varp
   559  // |     locals       |
   560  // +------------------+
   561  // |  args to callee  |
   562  // +------------------+
   563  // |  return address  |
   564  // +------------------+ <- frame->sp
   565  //
   566  // varp > sp means that the function has a frame;
   567  // varp == sp means frameless function.
   568  
   569  type adjustinfo struct {
   570  	old   stack
   571  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   572  
   573  	// sghi is the highest sudog.elem on the stack.
   574  	sghi uintptr
   575  }
   576  
   577  // adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   578  // If so, it rewrites *vpp to point into the new stack.
   579  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   580  	pp := (*uintptr)(vpp)
   581  	p := *pp
   582  	if stackDebug >= 4 {
   583  		print("        ", pp, ":", hex(p), "\n")
   584  	}
   585  	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   586  		*pp = p + adjinfo.delta
   587  		if stackDebug >= 3 {
   588  			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   589  		}
   590  	}
   591  }
   592  
   593  // Information from the compiler about the layout of stack frames.
   594  // Note: this type must agree with reflect.bitVector.
   595  type bitvector struct {
   596  	n        int32 // # of bits
   597  	bytedata *uint8
   598  }
   599  
   600  // ptrbit returns the i'th bit in bv.
   601  // ptrbit is less efficient than iterating directly over bitvector bits,
   602  // and should only be used in non-performance-critical code.
   603  // See adjustpointers for an example of a high-efficiency walk of a bitvector.
   604  func (bv *bitvector) ptrbit(i uintptr) uint8 {
   605  	b := *(addb(bv.bytedata, i/8))
   606  	return (b >> (i % 8)) & 1
   607  }
   608  
   609  // bv describes the memory starting at address scanp.
   610  // Adjust any pointers contained therein.
   611  func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   612  	minp := adjinfo.old.lo
   613  	maxp := adjinfo.old.hi
   614  	delta := adjinfo.delta
   615  	num := uintptr(bv.n)
   616  	// If this frame might contain channel receive slots, use CAS
   617  	// to adjust pointers. If the slot hasn't been received into
   618  	// yet, it may contain stack pointers and a concurrent send
   619  	// could race with adjusting those pointers. (The sent value
   620  	// itself can never contain stack pointers.)
   621  	useCAS := uintptr(scanp) < adjinfo.sghi
   622  	for i := uintptr(0); i < num; i += 8 {
   623  		if stackDebug >= 4 {
   624  			for j := uintptr(0); j < 8; j++ {
   625  				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   626  			}
   627  		}
   628  		b := *(addb(bv.bytedata, i/8))
   629  		for b != 0 {
   630  			j := uintptr(sys.TrailingZeros8(b))
   631  			b &= b - 1
   632  			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
   633  		retry:
   634  			p := *pp
   635  			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   636  				// Looks like a junk value in a pointer slot.
   637  				// Live analysis wrong?
   638  				getg().m.traceback = 2
   639  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   640  				throw("invalid pointer found on stack")
   641  			}
   642  			if minp <= p && p < maxp {
   643  				if stackDebug >= 3 {
   644  					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   645  				}
   646  				if useCAS {
   647  					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   648  					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   649  						goto retry
   650  					}
   651  				} else {
   652  					*pp = p + delta
   653  				}
   654  			}
   655  		}
   656  	}
   657  }
   658  
   659  // Note: the argument/return area is adjusted by the callee.
   660  func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
   661  	if frame.continpc == 0 {
   662  		// Frame is dead.
   663  		return
   664  	}
   665  	f := frame.fn
   666  	if stackDebug >= 2 {
   667  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   668  	}
   669  
   670  	// Adjust saved frame pointer if there is one.
   671  	if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
   672  		if stackDebug >= 3 {
   673  			print("      saved bp\n")
   674  		}
   675  		if debugCheckBP {
   676  			// Frame pointers should always point to the next higher frame on
   677  			// the Go stack (or be nil, for the top frame on the stack).
   678  			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   679  			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   680  				println("runtime: found invalid frame pointer")
   681  				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   682  				throw("bad frame pointer")
   683  			}
   684  		}
   685  		// On AMD64, this is the caller's frame pointer saved in the current
   686  		// frame.
   687  		// On ARM64, this is the frame pointer of the caller's caller saved
   688  		// by the caller in its frame (one word below its SP).
   689  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   690  	}
   691  
   692  	locals, args, objs := frame.getStackMap(true)
   693  
   694  	// Adjust local variables if stack frame has been allocated.
   695  	if locals.n > 0 {
   696  		size := uintptr(locals.n) * goarch.PtrSize
   697  		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   698  	}
   699  
   700  	// Adjust arguments.
   701  	if args.n > 0 {
   702  		if stackDebug >= 3 {
   703  			print("      args\n")
   704  		}
   705  		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   706  	}
   707  
   708  	// Adjust pointers in all stack objects (whether they are live or not).
   709  	// See comments in mgcmark.go:scanframeworker.
   710  	if frame.varp != 0 {
   711  		for i := range objs {
   712  			obj := &objs[i]
   713  			off := obj.off
   714  			base := frame.varp // locals base pointer
   715  			if off >= 0 {
   716  				base = frame.argp // arguments and return values base pointer
   717  			}
   718  			p := base + uintptr(off)
   719  			if p < frame.sp {
   720  				// Object hasn't been allocated in the frame yet.
   721  				// (Happens when the stack bounds check fails and
   722  				// we call into morestack.)
   723  				continue
   724  			}
   725  			ptrdata := obj.ptrdata()
   726  			gcdata := obj.gcdata()
   727  			var s *mspan
   728  			if obj.useGCProg() {
   729  				// See comments in mgcmark.go:scanstack
   730  				s = materializeGCProg(ptrdata, gcdata)
   731  				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   732  			}
   733  			for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
   734  				if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
   735  					adjustpointer(adjinfo, unsafe.Pointer(p+i))
   736  				}
   737  			}
   738  			if s != nil {
   739  				dematerializeGCProg(s)
   740  			}
   741  		}
   742  	}
   743  }
   744  
   745  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   746  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   747  	if !framepointer_enabled {
   748  		return
   749  	}
   750  	if debugCheckBP {
   751  		bp := gp.sched.bp
   752  		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   753  			println("runtime: found invalid top frame pointer")
   754  			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   755  			throw("bad top frame pointer")
   756  		}
   757  	}
   758  	oldfp := gp.sched.bp
   759  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   760  	if GOARCH == "arm64" {
   761  		// On ARM64, the frame pointer is saved one word *below* the SP,
   762  		// which is not copied or adjusted in any frame. Do it explicitly
   763  		// here.
   764  		if oldfp == gp.sched.sp-goarch.PtrSize {
   765  			memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
   766  			adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
   767  		}
   768  	}
   769  }
   770  
   771  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   772  	// Adjust pointers in the Defer structs.
   773  	// We need to do this first because we need to adjust the
   774  	// defer.link fields so we always work on the new stack.
   775  	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
   776  	for d := gp._defer; d != nil; d = d.link {
   777  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   778  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   779  		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
   780  	}
   781  }
   782  
   783  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   784  	// Panics are on stack and already adjusted.
   785  	// Update pointer to head of list in G.
   786  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   787  }
   788  
   789  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   790  	// the data elements pointed to by a SudoG structure
   791  	// might be in the stack.
   792  	for s := gp.waiting; s != nil; s = s.waitlink {
   793  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   794  	}
   795  }
   796  
   797  func fillstack(stk stack, b byte) {
   798  	for p := stk.lo; p < stk.hi; p++ {
   799  		*(*byte)(unsafe.Pointer(p)) = b
   800  	}
   801  }
   802  
   803  func findsghi(gp *g, stk stack) uintptr {
   804  	var sghi uintptr
   805  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   806  		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   807  		if stk.lo <= p && p < stk.hi && p > sghi {
   808  			sghi = p
   809  		}
   810  	}
   811  	return sghi
   812  }
   813  
   814  // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   815  // stack they refer to while synchronizing with concurrent channel
   816  // operations. It returns the number of bytes of stack copied.
   817  func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   818  	if gp.waiting == nil {
   819  		return 0
   820  	}
   821  
   822  	// Lock channels to prevent concurrent send/receive.
   823  	var lastc *hchan
   824  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   825  		if sg.c != lastc {
   826  			// There is a ranking cycle here between gscan bit and
   827  			// hchan locks. Normally, we only allow acquiring hchan
   828  			// locks and then getting a gscan bit. In this case, we
   829  			// already have the gscan bit. We allow acquiring hchan
   830  			// locks here as a special case, since a deadlock can't
   831  			// happen because the G involved must already be
   832  			// suspended. So, we get a special hchan lock rank here
   833  			// that is lower than gscan, but doesn't allow acquiring
   834  			// any other locks other than hchan.
   835  			lockWithRank(&sg.c.lock, lockRankHchanLeaf)
   836  		}
   837  		lastc = sg.c
   838  	}
   839  
   840  	// Adjust sudogs.
   841  	adjustsudogs(gp, adjinfo)
   842  
   843  	// Copy the part of the stack the sudogs point in to
   844  	// while holding the lock to prevent races on
   845  	// send/receive slots.
   846  	var sgsize uintptr
   847  	if adjinfo.sghi != 0 {
   848  		oldBot := adjinfo.old.hi - used
   849  		newBot := oldBot + adjinfo.delta
   850  		sgsize = adjinfo.sghi - oldBot
   851  		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   852  	}
   853  
   854  	// Unlock channels.
   855  	lastc = nil
   856  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   857  		if sg.c != lastc {
   858  			unlock(&sg.c.lock)
   859  		}
   860  		lastc = sg.c
   861  	}
   862  
   863  	return sgsize
   864  }
   865  
   866  // Copies gp's stack to a new stack of a different size.
   867  // Caller must have changed gp status to Gcopystack.
   868  func copystack(gp *g, newsize uintptr) {
   869  	if gp.syscallsp != 0 {
   870  		throw("stack growth not allowed in system call")
   871  	}
   872  	old := gp.stack
   873  	if old.lo == 0 {
   874  		throw("nil stackbase")
   875  	}
   876  	used := old.hi - gp.sched.sp
   877  	// Add just the difference to gcController.addScannableStack.
   878  	// g0 stacks never move, so this will never account for them.
   879  	// It's also fine if we have no P, addScannableStack can deal with
   880  	// that case.
   881  	gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
   882  
   883  	// allocate new stack
   884  	new := stackalloc(uint32(newsize))
   885  	if stackPoisonCopy != 0 {
   886  		fillstack(new, 0xfd)
   887  	}
   888  	if stackDebug >= 1 {
   889  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   890  	}
   891  
   892  	// Compute adjustment.
   893  	var adjinfo adjustinfo
   894  	adjinfo.old = old
   895  	adjinfo.delta = new.hi - old.hi
   896  
   897  	// Adjust sudogs, synchronizing with channel ops if necessary.
   898  	ncopy := used
   899  	if !gp.activeStackChans {
   900  		if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
   901  			// It's not safe for someone to shrink this stack while we're actively
   902  			// parking on a channel, but it is safe to grow since we do that
   903  			// ourselves and explicitly don't want to synchronize with channels
   904  			// since we could self-deadlock.
   905  			throw("racy sudog adjustment due to parking on channel")
   906  		}
   907  		adjustsudogs(gp, &adjinfo)
   908  	} else {
   909  		// sudogs may be pointing in to the stack and gp has
   910  		// released channel locks, so other goroutines could
   911  		// be writing to gp's stack. Find the highest such
   912  		// pointer so we can handle everything there and below
   913  		// carefully. (This shouldn't be far from the bottom
   914  		// of the stack, so there's little cost in handling
   915  		// everything below it carefully.)
   916  		adjinfo.sghi = findsghi(gp, old)
   917  
   918  		// Synchronize with channel ops and copy the part of
   919  		// the stack they may interact with.
   920  		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   921  	}
   922  
   923  	// Copy the stack (or the rest of it) to the new location
   924  	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   925  
   926  	// Adjust remaining structures that have pointers into stacks.
   927  	// We have to do most of these before we traceback the new
   928  	// stack because gentraceback uses them.
   929  	adjustctxt(gp, &adjinfo)
   930  	adjustdefers(gp, &adjinfo)
   931  	adjustpanics(gp, &adjinfo)
   932  	if adjinfo.sghi != 0 {
   933  		adjinfo.sghi += adjinfo.delta
   934  	}
   935  
   936  	// Swap out old stack for new one
   937  	gp.stack = new
   938  	gp.stackguard0 = new.lo + stackGuard // NOTE: might clobber a preempt request
   939  	gp.sched.sp = new.hi - used
   940  	gp.stktopsp += adjinfo.delta
   941  
   942  	// Adjust pointers in the new stack.
   943  	var u unwinder
   944  	for u.init(gp, 0); u.valid(); u.next() {
   945  		adjustframe(&u.frame, &adjinfo)
   946  	}
   947  
   948  	// free old stack
   949  	if stackPoisonCopy != 0 {
   950  		fillstack(old, 0xfc)
   951  	}
   952  	stackfree(old)
   953  }
   954  
   955  // round x up to a power of 2.
   956  func round2(x int32) int32 {
   957  	s := uint(0)
   958  	for 1<<s < x {
   959  		s++
   960  	}
   961  	return 1 << s
   962  }
   963  
   964  // Called from runtime·morestack when more stack is needed.
   965  // Allocate larger stack and relocate to new stack.
   966  // Stack growth is multiplicative, for constant amortized cost.
   967  //
   968  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   969  // If the scheduler is trying to stop this g, then it will set preemptStop.
   970  //
   971  // This must be nowritebarrierrec because it can be called as part of
   972  // stack growth from other nowritebarrierrec functions, but the
   973  // compiler doesn't check this.
   974  //
   975  //go:nowritebarrierrec
   976  func newstack() {
   977  	thisg := getg()
   978  	// TODO: double check all gp. shouldn't be getg().
   979  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   980  		throw("stack growth after fork")
   981  	}
   982  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   983  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   984  		morebuf := thisg.m.morebuf
   985  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   986  		throw("runtime: wrong goroutine in newstack")
   987  	}
   988  
   989  	gp := thisg.m.curg
   990  
   991  	if thisg.m.curg.throwsplit {
   992  		// Update syscallsp, syscallpc in case traceback uses them.
   993  		morebuf := thisg.m.morebuf
   994  		gp.syscallsp = morebuf.sp
   995  		gp.syscallpc = morebuf.pc
   996  		pcname, pcoff := "(unknown)", uintptr(0)
   997  		f := findfunc(gp.sched.pc)
   998  		if f.valid() {
   999  			pcname = funcname(f)
  1000  			pcoff = gp.sched.pc - f.entry()
  1001  		}
  1002  		print("runtime: newstack at ", pcname, "+", hex(pcoff),
  1003  			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
  1004  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
  1005  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
  1006  
  1007  		thisg.m.traceback = 2 // Include runtime frames
  1008  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
  1009  		throw("runtime: stack split at bad time")
  1010  	}
  1011  
  1012  	morebuf := thisg.m.morebuf
  1013  	thisg.m.morebuf.pc = 0
  1014  	thisg.m.morebuf.lr = 0
  1015  	thisg.m.morebuf.sp = 0
  1016  	thisg.m.morebuf.g = 0
  1017  
  1018  	// NOTE: stackguard0 may change underfoot, if another thread
  1019  	// is about to try to preempt gp. Read it just once and use that same
  1020  	// value now and below.
  1021  	stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
  1022  
  1023  	// Be conservative about where we preempt.
  1024  	// We are interested in preempting user Go code, not runtime code.
  1025  	// If we're holding locks, mallocing, or preemption is disabled, don't
  1026  	// preempt.
  1027  	// This check is very early in newstack so that even the status change
  1028  	// from Grunning to Gwaiting and back doesn't happen in this case.
  1029  	// That status change by itself can be viewed as a small preemption,
  1030  	// because the GC might change Gwaiting to Gscanwaiting, and then
  1031  	// this goroutine has to wait for the GC to finish before continuing.
  1032  	// If the GC is in some way dependent on this goroutine (for example,
  1033  	// it needs a lock held by the goroutine), that small preemption turns
  1034  	// into a real deadlock.
  1035  	preempt := stackguard0 == stackPreempt
  1036  	if preempt {
  1037  		if !canPreemptM(thisg.m) {
  1038  			// Let the goroutine keep running for now.
  1039  			// gp->preempt is set, so it will be preempted next time.
  1040  			gp.stackguard0 = gp.stack.lo + stackGuard
  1041  			gogo(&gp.sched) // never return
  1042  		}
  1043  	}
  1044  
  1045  	if gp.stack.lo == 0 {
  1046  		throw("missing stack in newstack")
  1047  	}
  1048  	sp := gp.sched.sp
  1049  	if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
  1050  		// The call to morestack cost a word.
  1051  		sp -= goarch.PtrSize
  1052  	}
  1053  	if stackDebug >= 1 || sp < gp.stack.lo {
  1054  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
  1055  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
  1056  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
  1057  	}
  1058  	if sp < gp.stack.lo {
  1059  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
  1060  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
  1061  		throw("runtime: split stack overflow")
  1062  	}
  1063  
  1064  	if preempt {
  1065  		if gp == thisg.m.g0 {
  1066  			throw("runtime: preempt g0")
  1067  		}
  1068  		if thisg.m.p == 0 && thisg.m.locks == 0 {
  1069  			throw("runtime: g is running but p is not")
  1070  		}
  1071  
  1072  		if gp.preemptShrink {
  1073  			// We're at a synchronous safe point now, so
  1074  			// do the pending stack shrink.
  1075  			gp.preemptShrink = false
  1076  			shrinkstack(gp)
  1077  		}
  1078  
  1079  		if gp.preemptStop {
  1080  			preemptPark(gp) // never returns
  1081  		}
  1082  
  1083  		// Act like goroutine called runtime.Gosched.
  1084  		gopreempt_m(gp) // never return
  1085  	}
  1086  
  1087  	// Allocate a bigger segment and move the stack.
  1088  	oldsize := gp.stack.hi - gp.stack.lo
  1089  	newsize := oldsize * 2
  1090  
  1091  	// Make sure we grow at least as much as needed to fit the new frame.
  1092  	// (This is just an optimization - the caller of morestack will
  1093  	// recheck the bounds on return.)
  1094  	if f := findfunc(gp.sched.pc); f.valid() {
  1095  		max := uintptr(funcMaxSPDelta(f))
  1096  		needed := max + stackGuard
  1097  		used := gp.stack.hi - gp.sched.sp
  1098  		for newsize-used < needed {
  1099  			newsize *= 2
  1100  		}
  1101  	}
  1102  
  1103  	if stackguard0 == stackForceMove {
  1104  		// Forced stack movement used for debugging.
  1105  		// Don't double the stack (or we may quickly run out
  1106  		// if this is done repeatedly).
  1107  		newsize = oldsize
  1108  	}
  1109  
  1110  	if newsize > maxstacksize || newsize > maxstackceiling {
  1111  		if maxstacksize < maxstackceiling {
  1112  			print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1113  		} else {
  1114  			print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
  1115  		}
  1116  		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1117  		throw("stack overflow")
  1118  	}
  1119  
  1120  	// The goroutine must be executing in order to call newstack,
  1121  	// so it must be Grunning (or Gscanrunning).
  1122  	casgstatus(gp, _Grunning, _Gcopystack)
  1123  
  1124  	// The concurrent GC will not scan the stack while we are doing the copy since
  1125  	// the gp is in a Gcopystack status.
  1126  	copystack(gp, newsize)
  1127  	if stackDebug >= 1 {
  1128  		print("stack grow done\n")
  1129  	}
  1130  	casgstatus(gp, _Gcopystack, _Grunning)
  1131  	gogo(&gp.sched)
  1132  }
  1133  
  1134  //go:nosplit
  1135  func nilfunc() {
  1136  	*(*uint8)(nil) = 0
  1137  }
  1138  
  1139  // adjust Gobuf as if it executed a call to fn
  1140  // and then stopped before the first instruction in fn.
  1141  func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1142  	var fn unsafe.Pointer
  1143  	if fv != nil {
  1144  		fn = unsafe.Pointer(fv.fn)
  1145  	} else {
  1146  		fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
  1147  	}
  1148  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1149  }
  1150  
  1151  // isShrinkStackSafe returns whether it's safe to attempt to shrink
  1152  // gp's stack. Shrinking the stack is only safe when we have precise
  1153  // pointer maps for all frames on the stack. The caller must hold the
  1154  // _Gscan bit for gp or must be running gp itself.
  1155  func isShrinkStackSafe(gp *g) bool {
  1156  	// We can't copy the stack if we're in a syscall.
  1157  	// The syscall might have pointers into the stack and
  1158  	// often we don't have precise pointer maps for the innermost
  1159  	// frames.
  1160  	if gp.syscallsp != 0 {
  1161  		return false
  1162  	}
  1163  	// We also can't copy the stack if we're at an asynchronous
  1164  	// safe-point because we don't have precise pointer maps for
  1165  	// all frames.
  1166  	if gp.asyncSafePoint {
  1167  		return false
  1168  	}
  1169  	// We also can't *shrink* the stack in the window between the
  1170  	// goroutine calling gopark to park on a channel and
  1171  	// gp.activeStackChans being set.
  1172  	if gp.parkingOnChan.Load() {
  1173  		return false
  1174  	}
  1175  	// We also can't copy the stack while tracing is enabled, and
  1176  	// gp is in _Gwaiting solely to make itself available to the GC.
  1177  	// In these cases, the G is actually executing on the system
  1178  	// stack, and the execution tracer may want to take a stack trace
  1179  	// of the G's stack. Note: it's safe to access gp.waitreason here.
  1180  	// We're only checking if this is true if we took ownership of the
  1181  	// G with the _Gscan bit. This prevents the goroutine from transitioning,
  1182  	// which prevents gp.waitreason from changing.
  1183  	if traceEnabled() && readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForGC() {
  1184  		return false
  1185  	}
  1186  	return true
  1187  }
  1188  
  1189  // Maybe shrink the stack being used by gp.
  1190  //
  1191  // gp must be stopped and we must own its stack. It may be in
  1192  // _Grunning, but only if this is our own user G.
  1193  func shrinkstack(gp *g) {
  1194  	if gp.stack.lo == 0 {
  1195  		throw("missing stack in shrinkstack")
  1196  	}
  1197  	if s := readgstatus(gp); s&_Gscan == 0 {
  1198  		// We don't own the stack via _Gscan. We could still
  1199  		// own it if this is our own user G and we're on the
  1200  		// system stack.
  1201  		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
  1202  			// We don't own the stack.
  1203  			throw("bad status in shrinkstack")
  1204  		}
  1205  	}
  1206  	if !isShrinkStackSafe(gp) {
  1207  		throw("shrinkstack at bad time")
  1208  	}
  1209  	// Check for self-shrinks while in a libcall. These may have
  1210  	// pointers into the stack disguised as uintptrs, but these
  1211  	// code paths should all be nosplit.
  1212  	if gp == getg().m.curg && gp.m.libcallsp != 0 {
  1213  		throw("shrinking stack in libcall")
  1214  	}
  1215  
  1216  	if debug.gcshrinkstackoff > 0 {
  1217  		return
  1218  	}
  1219  	f := findfunc(gp.startpc)
  1220  	if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
  1221  		// We're not allowed to shrink the gcBgMarkWorker
  1222  		// stack (see gcBgMarkWorker for explanation).
  1223  		return
  1224  	}
  1225  
  1226  	oldsize := gp.stack.hi - gp.stack.lo
  1227  	newsize := oldsize / 2
  1228  	// Don't shrink the allocation below the minimum-sized stack
  1229  	// allocation.
  1230  	if newsize < fixedStack {
  1231  		return
  1232  	}
  1233  	// Compute how much of the stack is currently in use and only
  1234  	// shrink the stack if gp is using less than a quarter of its
  1235  	// current stack. The currently used stack includes everything
  1236  	// down to the SP plus the stack guard space that ensures
  1237  	// there's room for nosplit functions.
  1238  	avail := gp.stack.hi - gp.stack.lo
  1239  	if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
  1240  		return
  1241  	}
  1242  
  1243  	if stackDebug > 0 {
  1244  		print("shrinking stack ", oldsize, "->", newsize, "\n")
  1245  	}
  1246  
  1247  	copystack(gp, newsize)
  1248  }
  1249  
  1250  // freeStackSpans frees unused stack spans at the end of GC.
  1251  func freeStackSpans() {
  1252  	// Scan stack pools for empty stack spans.
  1253  	for order := range stackpool {
  1254  		lock(&stackpool[order].item.mu)
  1255  		list := &stackpool[order].item.span
  1256  		for s := list.first; s != nil; {
  1257  			next := s.next
  1258  			if s.allocCount == 0 {
  1259  				list.remove(s)
  1260  				s.manualFreeList = 0
  1261  				osStackFree(s)
  1262  				mheap_.freeManual(s, spanAllocStack)
  1263  			}
  1264  			s = next
  1265  		}
  1266  		unlock(&stackpool[order].item.mu)
  1267  	}
  1268  
  1269  	// Free large stack spans.
  1270  	lock(&stackLarge.lock)
  1271  	for i := range stackLarge.free {
  1272  		for s := stackLarge.free[i].first; s != nil; {
  1273  			next := s.next
  1274  			stackLarge.free[i].remove(s)
  1275  			osStackFree(s)
  1276  			mheap_.freeManual(s, spanAllocStack)
  1277  			s = next
  1278  		}
  1279  	}
  1280  	unlock(&stackLarge.lock)
  1281  }
  1282  
  1283  // A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
  1284  // This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
  1285  type stackObjectRecord struct {
  1286  	// offset in frame
  1287  	// if negative, offset from varp
  1288  	// if non-negative, offset from argp
  1289  	off       int32
  1290  	size      int32
  1291  	_ptrdata  int32  // ptrdata, or -ptrdata is GC prog is used
  1292  	gcdataoff uint32 // offset to gcdata from moduledata.rodata
  1293  }
  1294  
  1295  func (r *stackObjectRecord) useGCProg() bool {
  1296  	return r._ptrdata < 0
  1297  }
  1298  
  1299  func (r *stackObjectRecord) ptrdata() uintptr {
  1300  	x := r._ptrdata
  1301  	if x < 0 {
  1302  		return uintptr(-x)
  1303  	}
  1304  	return uintptr(x)
  1305  }
  1306  
  1307  // gcdata returns pointer map or GC prog of the type.
  1308  func (r *stackObjectRecord) gcdata() *byte {
  1309  	ptr := uintptr(unsafe.Pointer(r))
  1310  	var mod *moduledata
  1311  	for datap := &firstmoduledata; datap != nil; datap = datap.next {
  1312  		if datap.gofunc <= ptr && ptr < datap.end {
  1313  			mod = datap
  1314  			break
  1315  		}
  1316  	}
  1317  	// If you get a panic here due to a nil mod,
  1318  	// you may have made a copy of a stackObjectRecord.
  1319  	// You must use the original pointer.
  1320  	res := mod.rodata + uintptr(r.gcdataoff)
  1321  	return (*byte)(unsafe.Pointer(res))
  1322  }
  1323  
  1324  // This is exported as ABI0 via linkname so obj can call it.
  1325  //
  1326  //go:nosplit
  1327  //go:linkname morestackc
  1328  func morestackc() {
  1329  	throw("attempt to execute system stack code on user stack")
  1330  }
  1331  
  1332  // startingStackSize is the amount of stack that new goroutines start with.
  1333  // It is a power of 2, and between _FixedStack and maxstacksize, inclusive.
  1334  // startingStackSize is updated every GC by tracking the average size of
  1335  // stacks scanned during the GC.
  1336  var startingStackSize uint32 = fixedStack
  1337  
  1338  func gcComputeStartingStackSize() {
  1339  	if debug.adaptivestackstart == 0 {
  1340  		return
  1341  	}
  1342  	// For details, see the design doc at
  1343  	// https://docs.google.com/document/d/1YDlGIdVTPnmUiTAavlZxBI1d9pwGQgZT7IKFKlIXohQ/edit?usp=sharing
  1344  	// The basic algorithm is to track the average size of stacks
  1345  	// and start goroutines with stack equal to that average size.
  1346  	// Starting at the average size uses at most 2x the space that
  1347  	// an ideal algorithm would have used.
  1348  	// This is just a heuristic to avoid excessive stack growth work
  1349  	// early in a goroutine's lifetime. See issue 18138. Stacks that
  1350  	// are allocated too small can still grow, and stacks allocated
  1351  	// too large can still shrink.
  1352  	var scannedStackSize uint64
  1353  	var scannedStacks uint64
  1354  	for _, p := range allp {
  1355  		scannedStackSize += p.scannedStackSize
  1356  		scannedStacks += p.scannedStacks
  1357  		// Reset for next time
  1358  		p.scannedStackSize = 0
  1359  		p.scannedStacks = 0
  1360  	}
  1361  	if scannedStacks == 0 {
  1362  		startingStackSize = fixedStack
  1363  		return
  1364  	}
  1365  	avg := scannedStackSize/scannedStacks + stackGuard
  1366  	// Note: we add stackGuard to ensure that a goroutine that
  1367  	// uses the average space will not trigger a growth.
  1368  	if avg > uint64(maxstacksize) {
  1369  		avg = uint64(maxstacksize)
  1370  	}
  1371  	if avg < fixedStack {
  1372  		avg = fixedStack
  1373  	}
  1374  	// Note: maxstacksize fits in 30 bits, so avg also does.
  1375  	startingStackSize = uint32(round2(int32(avg)))
  1376  }
  1377  

View as plain text