Source file src/runtime/mcleanup.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/cpu"
    10  	"internal/goarch"
    11  	"internal/runtime/atomic"
    12  	"internal/runtime/math"
    13  	"internal/runtime/sys"
    14  	"unsafe"
    15  )
    16  
    17  // AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer
    18  // reachable, the runtime will call cleanup(arg) in a separate goroutine.
    19  //
    20  // A typical use is that ptr is an object wrapping an underlying resource (e.g.,
    21  // a File object wrapping an OS file descriptor), arg is the underlying resource
    22  // (e.g., the OS file descriptor), and the cleanup function releases the underlying
    23  // resource (e.g., by calling the close system call).
    24  //
    25  // There are few constraints on ptr. In particular, multiple cleanups may be
    26  // attached to the same pointer, or to different pointers within the same
    27  // allocation.
    28  //
    29  // If ptr is reachable from cleanup or arg, ptr will never be collected
    30  // and the cleanup will never run. As a protection against simple cases of this,
    31  // AddCleanup panics if arg is equal to ptr.
    32  //
    33  // There is no specified order in which cleanups will run.
    34  // In particular, if several objects point to each other and all become
    35  // unreachable at the same time, their cleanups all become eligible to run
    36  // and can run in any order. This is true even if the objects form a cycle.
    37  //
    38  // Cleanups run concurrently with any user-created goroutines.
    39  // Cleanups may also run concurrently with one another (unlike finalizers).
    40  // If a cleanup function must run for a long time, it should create a new goroutine
    41  // to avoid blocking the execution of other cleanups.
    42  //
    43  // If ptr has both a cleanup and a finalizer, the cleanup will only run once
    44  // it has been finalized and becomes unreachable without an associated finalizer.
    45  //
    46  // The cleanup(arg) call is not always guaranteed to run; in particular it is not
    47  // guaranteed to run before program exit.
    48  //
    49  // Cleanups are not guaranteed to run if the size of T is zero bytes, because
    50  // it may share same address with other zero-size objects in memory. See
    51  // https://go.dev/ref/spec#Size_and_alignment_guarantees.
    52  //
    53  // It is not guaranteed that a cleanup will run for objects allocated
    54  // in initializers for package-level variables. Such objects may be
    55  // linker-allocated, not heap-allocated.
    56  //
    57  // Note that because cleanups may execute arbitrarily far into the future
    58  // after an object is no longer referenced, the runtime is allowed to perform
    59  // a space-saving optimization that batches objects together in a single
    60  // allocation slot. The cleanup for an unreferenced object in such an
    61  // allocation may never run if it always exists in the same batch as a
    62  // referenced object. Typically, this batching only happens for tiny
    63  // (on the order of 16 bytes or less) and pointer-free objects.
    64  //
    65  // A cleanup may run as soon as an object becomes unreachable.
    66  // In order to use cleanups correctly, the program must ensure that
    67  // the object is reachable until it is safe to run its cleanup.
    68  // Objects stored in global variables, or that can be found by tracing
    69  // pointers from a global variable, are reachable. A function argument or
    70  // receiver may become unreachable at the last point where the function
    71  // mentions it. To ensure a cleanup does not get called prematurely,
    72  // pass the object to the [KeepAlive] function after the last point
    73  // where the object must remain reachable.
    74  func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup {
    75  	// Explicitly force ptr and cleanup to escape to the heap.
    76  	ptr = abi.Escape(ptr)
    77  	cleanup = abi.Escape(cleanup)
    78  
    79  	// The pointer to the object must be valid.
    80  	if ptr == nil {
    81  		panic("runtime.AddCleanup: ptr is nil")
    82  	}
    83  	usptr := uintptr(unsafe.Pointer(ptr))
    84  
    85  	// Check that arg is not equal to ptr.
    86  	argType := abi.TypeOf(arg)
    87  	if kind := argType.Kind(); kind == abi.Pointer || kind == abi.UnsafePointer {
    88  		if unsafe.Pointer(ptr) == *((*unsafe.Pointer)(unsafe.Pointer(&arg))) {
    89  			panic("runtime.AddCleanup: ptr is equal to arg, cleanup will never run")
    90  		}
    91  	}
    92  	if inUserArenaChunk(usptr) {
    93  		// Arena-allocated objects are not eligible for cleanup.
    94  		panic("runtime.AddCleanup: ptr is arena-allocated")
    95  	}
    96  	if debug.sbrk != 0 {
    97  		// debug.sbrk never frees memory, so no cleanup will ever run
    98  		// (and we don't have the data structures to record them).
    99  		// Return a noop cleanup.
   100  		return Cleanup{}
   101  	}
   102  
   103  	// Create new storage for the argument.
   104  	var argv *S
   105  	if size := unsafe.Sizeof(arg); size < maxTinySize && argType.PtrBytes == 0 {
   106  		// Side-step the tiny allocator to avoid liveness issues, since this box
   107  		// will be treated like a root by the GC. We model the box as an array of
   108  		// uintptrs to guarantee maximum allocator alignment.
   109  		//
   110  		// TODO(mknyszek): Consider just making space in cleanupFn for this. The
   111  		// unfortunate part of this is it would grow specialCleanup by 16 bytes, so
   112  		// while there wouldn't be an allocation, *every* cleanup would take the
   113  		// memory overhead hit.
   114  		box := new([maxTinySize / goarch.PtrSize]uintptr)
   115  		argv = (*S)(unsafe.Pointer(box))
   116  	} else {
   117  		argv = new(S)
   118  	}
   119  	*argv = arg
   120  
   121  	// Find the containing object.
   122  	base, _, _ := findObject(usptr, 0, 0)
   123  	if base == 0 {
   124  		if isGoPointerWithoutSpan(unsafe.Pointer(ptr)) {
   125  			// Cleanup is a noop.
   126  			return Cleanup{}
   127  		}
   128  		panic("runtime.AddCleanup: ptr not in allocated block")
   129  	}
   130  
   131  	// Create another G if necessary.
   132  	if gcCleanups.needG() {
   133  		gcCleanups.createGs()
   134  	}
   135  
   136  	id := addCleanup(unsafe.Pointer(ptr), cleanupFn{
   137  		// Instantiate a caller function to call the cleanup, that is cleanup(*argv).
   138  		//
   139  		// TODO(mknyszek): This allocates because the generic dictionary argument
   140  		// gets closed over, but callCleanup doesn't even use the dictionary argument,
   141  		// so theoretically that could be removed, eliminating an allocation.
   142  		call: callCleanup[S],
   143  		fn:   *(**funcval)(unsafe.Pointer(&cleanup)),
   144  		arg:  unsafe.Pointer(argv),
   145  	})
   146  	if debug.checkfinalizers != 0 {
   147  		cleanupFn := *(**funcval)(unsafe.Pointer(&cleanup))
   148  		setCleanupContext(unsafe.Pointer(ptr), abi.TypeFor[T](), sys.GetCallerPC(), cleanupFn.fn, id)
   149  	}
   150  	return Cleanup{
   151  		id:  id,
   152  		ptr: usptr,
   153  	}
   154  }
   155  
   156  // callCleanup is a helper for calling cleanups in a polymorphic way.
   157  //
   158  // In practice, all it does is call fn(*arg). arg must be a *T.
   159  //
   160  //go:noinline
   161  func callCleanup[T any](fn *funcval, arg unsafe.Pointer) {
   162  	cleanup := *(*func(T))(unsafe.Pointer(&fn))
   163  	cleanup(*(*T)(arg))
   164  }
   165  
   166  // Cleanup is a handle to a cleanup call for a specific object.
   167  type Cleanup struct {
   168  	// id is the unique identifier for the cleanup within the arena.
   169  	id uint64
   170  	// ptr contains the pointer to the object.
   171  	ptr uintptr
   172  }
   173  
   174  // Stop cancels the cleanup call. Stop will have no effect if the cleanup call
   175  // has already been queued for execution (because ptr became unreachable).
   176  // To guarantee that Stop removes the cleanup function, the caller must ensure
   177  // that the pointer that was passed to AddCleanup is reachable across the call to Stop.
   178  func (c Cleanup) Stop() {
   179  	if c.id == 0 {
   180  		// id is set to zero when the cleanup is a noop.
   181  		return
   182  	}
   183  
   184  	// The following block removes the Special record of type cleanup for the object c.ptr.
   185  	span := spanOfHeap(c.ptr)
   186  	if span == nil {
   187  		return
   188  	}
   189  	// Ensure that the span is swept.
   190  	// Sweeping accesses the specials list w/o locks, so we have
   191  	// to synchronize with it. And it's just much safer.
   192  	mp := acquirem()
   193  	span.ensureSwept()
   194  
   195  	offset := c.ptr - span.base()
   196  
   197  	var found *special
   198  	lock(&span.speciallock)
   199  
   200  	iter, exists := span.specialFindSplicePoint(offset, _KindSpecialCleanup)
   201  	if exists {
   202  		for {
   203  			s := *iter
   204  			if s == nil {
   205  				// Reached the end of the linked list. Stop searching at this point.
   206  				break
   207  			}
   208  			if offset == s.offset && _KindSpecialCleanup == s.kind &&
   209  				(*specialCleanup)(unsafe.Pointer(s)).id == c.id {
   210  				// The special is a cleanup and contains a matching cleanup id.
   211  				*iter = s.next
   212  				found = s
   213  				break
   214  			}
   215  			if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
   216  				// The special is outside the region specified for that kind of
   217  				// special. The specials are sorted by kind.
   218  				break
   219  			}
   220  			// Try the next special.
   221  			iter = &s.next
   222  		}
   223  	}
   224  	if span.specials == nil {
   225  		spanHasNoSpecials(span)
   226  	}
   227  	unlock(&span.speciallock)
   228  	releasem(mp)
   229  
   230  	if found == nil {
   231  		return
   232  	}
   233  	lock(&mheap_.speciallock)
   234  	mheap_.specialCleanupAlloc.free(unsafe.Pointer(found))
   235  	unlock(&mheap_.speciallock)
   236  
   237  	if debug.checkfinalizers != 0 {
   238  		clearCleanupContext(c.ptr, c.id)
   239  	}
   240  }
   241  
   242  const cleanupBlockSize = 512
   243  
   244  // cleanupBlock is an block of cleanups to be executed.
   245  //
   246  // cleanupBlock is allocated from non-GC'd memory, so any heap pointers
   247  // must be specially handled. The GC and cleanup queue currently assume
   248  // that the cleanup queue does not grow during marking (but it can shrink).
   249  type cleanupBlock struct {
   250  	cleanupBlockHeader
   251  	cleanups [(cleanupBlockSize - unsafe.Sizeof(cleanupBlockHeader{})) / unsafe.Sizeof(cleanupFn{})]cleanupFn
   252  }
   253  
   254  var cleanupFnPtrMask = [...]uint8{0b111}
   255  
   256  // cleanupFn represents a cleanup function with it's argument, yet to be called.
   257  type cleanupFn struct {
   258  	// call is an adapter function that understands how to safely call fn(*arg).
   259  	call func(*funcval, unsafe.Pointer)
   260  	fn   *funcval       // cleanup function passed to AddCleanup.
   261  	arg  unsafe.Pointer // pointer to argument to pass to cleanup function.
   262  }
   263  
   264  var cleanupBlockPtrMask [cleanupBlockSize / goarch.PtrSize / 8]byte
   265  
   266  type cleanupBlockHeader struct {
   267  	_ sys.NotInHeap
   268  	lfnode
   269  	alllink *cleanupBlock
   270  
   271  	// n is sometimes accessed atomically.
   272  	//
   273  	// The invariant depends on what phase the garbage collector is in.
   274  	// During the sweep phase (gcphase == _GCoff), each block has exactly
   275  	// one owner, so it's always safe to update this without atomics.
   276  	// But if this *could* be updated during the mark phase, it must be
   277  	// updated atomically to synchronize with the garbage collector
   278  	// scanning the block as a root.
   279  	n uint32
   280  }
   281  
   282  // enqueue pushes a single cleanup function into the block.
   283  //
   284  // Returns if this enqueue call filled the block. This is odd,
   285  // but we want to flush full blocks eagerly to get cleanups
   286  // running as soon as possible.
   287  //
   288  // Must only be called if the GC is in the sweep phase (gcphase == _GCoff),
   289  // because it does not synchronize with the garbage collector.
   290  func (b *cleanupBlock) enqueue(c cleanupFn) bool {
   291  	b.cleanups[b.n] = c
   292  	b.n++
   293  	return b.full()
   294  }
   295  
   296  // full returns true if the cleanup block is full.
   297  func (b *cleanupBlock) full() bool {
   298  	return b.n == uint32(len(b.cleanups))
   299  }
   300  
   301  // empty returns true if the cleanup block is empty.
   302  func (b *cleanupBlock) empty() bool {
   303  	return b.n == 0
   304  }
   305  
   306  // take moves as many cleanups as possible from b into a.
   307  func (a *cleanupBlock) take(b *cleanupBlock) {
   308  	dst := a.cleanups[a.n:]
   309  	if uint32(len(dst)) >= b.n {
   310  		// Take all.
   311  		copy(dst, b.cleanups[:])
   312  		a.n += b.n
   313  		b.n = 0
   314  	} else {
   315  		// Partial take. Copy from the tail to avoid having
   316  		// to move more memory around.
   317  		copy(dst, b.cleanups[b.n-uint32(len(dst)):b.n])
   318  		a.n = uint32(len(a.cleanups))
   319  		b.n -= uint32(len(dst))
   320  	}
   321  }
   322  
   323  // cleanupQueue is a queue of ready-to-run cleanup functions.
   324  type cleanupQueue struct {
   325  	// Stack of full cleanup blocks.
   326  	full      lfstack
   327  	workUnits atomic.Uint64 // length of full; decrement before pop from full, increment after push to full
   328  	_         [cpu.CacheLinePadSize - unsafe.Sizeof(lfstack(0)) - unsafe.Sizeof(atomic.Uint64{})]byte
   329  
   330  	// Stack of free cleanup blocks.
   331  	free lfstack
   332  
   333  	// flushed indicates whether all local cleanupBlocks have been
   334  	// flushed, and we're in a period of time where this condition is
   335  	// stable (after the last sweeper, before the next sweep phase
   336  	// begins).
   337  	flushed atomic.Bool // Next to free because frequently accessed together.
   338  
   339  	_ [cpu.CacheLinePadSize - unsafe.Sizeof(lfstack(0)) - 1]byte
   340  
   341  	// Linked list of all cleanup blocks.
   342  	all atomic.UnsafePointer // *cleanupBlock
   343  	_   [cpu.CacheLinePadSize - unsafe.Sizeof(atomic.UnsafePointer{})]byte
   344  
   345  	// Goroutine block state.
   346  	lock mutex
   347  
   348  	// sleeping is the list of sleeping cleanup goroutines.
   349  	//
   350  	// Protected by lock.
   351  	sleeping gList
   352  
   353  	// asleep is the number of cleanup goroutines sleeping.
   354  	//
   355  	// Read without lock, written only with the lock held.
   356  	// When the lock is held, the lock holder may only observe
   357  	// asleep.Load() == sleeping.n.
   358  	//
   359  	// To make reading without the lock safe as a signal to wake up
   360  	// a goroutine and handle new work, it must always be greater
   361  	// than or equal to sleeping.n. In the periods of time that it
   362  	// is strictly greater, it may cause spurious calls to wake.
   363  	asleep atomic.Uint32
   364  
   365  	// running indicates the number of cleanup goroutines actively
   366  	// executing user cleanup functions at any point in time.
   367  	//
   368  	// Read and written to without lock.
   369  	running atomic.Uint32
   370  
   371  	// ng is the number of cleanup goroutines.
   372  	//
   373  	// Read without lock, written only with lock held.
   374  	ng atomic.Uint32
   375  
   376  	// needg is the number of new cleanup goroutines that
   377  	// need to be created.
   378  	//
   379  	// Read without lock, written only with lock held.
   380  	needg atomic.Uint32
   381  
   382  	// Cleanup queue stats.
   383  
   384  	// queued represents a monotonic count of queued cleanups. This is sharded across
   385  	// Ps via the field cleanupsQueued in each p, so reading just this value is insufficient.
   386  	// In practice, this value only includes the queued count of dead Ps.
   387  	//
   388  	// Writes are protected by STW.
   389  	queued uint64
   390  
   391  	// executed is a monotonic count of executed cleanups.
   392  	//
   393  	// Read and updated atomically.
   394  	executed atomic.Uint64
   395  }
   396  
   397  // addWork indicates that n units of parallelizable work have been added to the queue.
   398  func (q *cleanupQueue) addWork(n int) {
   399  	q.workUnits.Add(int64(n))
   400  }
   401  
   402  // tryTakeWork is an attempt to dequeue some work by a cleanup goroutine.
   403  // This might fail if there's no work to do.
   404  func (q *cleanupQueue) tryTakeWork() bool {
   405  	for {
   406  		wu := q.workUnits.Load()
   407  		if wu == 0 {
   408  			return false
   409  		}
   410  		// CAS to prevent us from going negative.
   411  		if q.workUnits.CompareAndSwap(wu, wu-1) {
   412  			return true
   413  		}
   414  	}
   415  }
   416  
   417  // enqueue queues a single cleanup for execution.
   418  //
   419  // Called by the sweeper, and only the sweeper.
   420  func (q *cleanupQueue) enqueue(c cleanupFn) {
   421  	mp := acquirem()
   422  	pp := mp.p.ptr()
   423  	b := pp.cleanups
   424  	if b == nil {
   425  		if q.flushed.Load() {
   426  			q.flushed.Store(false)
   427  		}
   428  		b = (*cleanupBlock)(q.free.pop())
   429  		if b == nil {
   430  			b = (*cleanupBlock)(persistentalloc(cleanupBlockSize, tagAlign, &memstats.gcMiscSys))
   431  			for {
   432  				next := (*cleanupBlock)(q.all.Load())
   433  				b.alllink = next
   434  				if q.all.CompareAndSwap(unsafe.Pointer(next), unsafe.Pointer(b)) {
   435  					break
   436  				}
   437  			}
   438  		}
   439  		pp.cleanups = b
   440  	}
   441  	if full := b.enqueue(c); full {
   442  		q.full.push(&b.lfnode)
   443  		pp.cleanups = nil
   444  		q.addWork(1)
   445  	}
   446  	pp.cleanupsQueued++
   447  	releasem(mp)
   448  }
   449  
   450  // dequeue pops a block of cleanups from the queue. Blocks until one is available
   451  // and never returns nil.
   452  func (q *cleanupQueue) dequeue() *cleanupBlock {
   453  	for {
   454  		if q.tryTakeWork() {
   455  			// Guaranteed to be non-nil.
   456  			return (*cleanupBlock)(q.full.pop())
   457  		}
   458  		lock(&q.lock)
   459  		// Increment asleep first. We may have to undo this if we abort the sleep.
   460  		// We must update asleep first because the scheduler might not try to wake
   461  		// us up when work comes in between the last check of workUnits and when we
   462  		// go to sleep. (It may see asleep as 0.) By incrementing it here, we guarantee
   463  		// after this point that if new work comes in, someone will try to grab the
   464  		// lock and wake us. However, this also means that if we back out, we may cause
   465  		// someone to spuriously grab the lock and try to wake us up, only to fail.
   466  		// This should be very rare because the window here is incredibly small: the
   467  		// window between now and when we decrement q.asleep below.
   468  		q.asleep.Add(1)
   469  
   470  		// Re-check workUnits under the lock and with asleep updated. If it's still zero,
   471  		// then no new work came in, and it's safe for us to go to sleep. If new work
   472  		// comes in after this point, then the scheduler will notice that we're sleeping
   473  		// and wake us up.
   474  		if q.workUnits.Load() > 0 {
   475  			// Undo the q.asleep update and try to take work again.
   476  			q.asleep.Add(-1)
   477  			unlock(&q.lock)
   478  			continue
   479  		}
   480  		q.sleeping.push(getg())
   481  		goparkunlock(&q.lock, waitReasonCleanupWait, traceBlockSystemGoroutine, 1)
   482  	}
   483  }
   484  
   485  // flush pushes all active cleanup blocks to the full list and wakes up cleanup
   486  // goroutines to handle them.
   487  //
   488  // Must only be called at a point when we can guarantee that no more cleanups
   489  // are being queued, such as after the final sweeper for the cycle is done
   490  // but before the next mark phase.
   491  func (q *cleanupQueue) flush() {
   492  	mp := acquirem()
   493  	flushed := 0
   494  	emptied := 0
   495  	missing := 0
   496  
   497  	// Coalesce the partially-filled blocks to present a more accurate picture of demand.
   498  	// We use the number of coalesced blocks to process as a signal for demand to create
   499  	// new cleanup goroutines.
   500  	var cb *cleanupBlock
   501  	for _, pp := range allp {
   502  		if pp == nil {
   503  			// This function is reachable via mallocgc in the
   504  			// middle of procresize, when allp has been resized,
   505  			// but the new Ps not allocated yet.
   506  			missing++
   507  			continue
   508  		}
   509  		b := pp.cleanups
   510  		if b == nil {
   511  			missing++
   512  			continue
   513  		}
   514  		pp.cleanups = nil
   515  		if cb == nil {
   516  			cb = b
   517  			continue
   518  		}
   519  		// N.B. After take, either cb is full, b is empty, or both.
   520  		cb.take(b)
   521  		if cb.full() {
   522  			q.full.push(&cb.lfnode)
   523  			flushed++
   524  			cb = b
   525  			b = nil
   526  		}
   527  		if b != nil && b.empty() {
   528  			q.free.push(&b.lfnode)
   529  			emptied++
   530  		}
   531  	}
   532  	if cb != nil {
   533  		q.full.push(&cb.lfnode)
   534  		flushed++
   535  	}
   536  	if flushed != 0 {
   537  		q.addWork(flushed)
   538  	}
   539  	if flushed+emptied+missing != len(allp) {
   540  		throw("failed to correctly flush all P-owned cleanup blocks")
   541  	}
   542  	q.flushed.Store(true)
   543  	releasem(mp)
   544  }
   545  
   546  // needsWake returns true if cleanup goroutines may need to be awoken or created to handle cleanup load.
   547  func (q *cleanupQueue) needsWake() bool {
   548  	return q.workUnits.Load() > 0 && (q.asleep.Load() > 0 || q.ng.Load() < maxCleanupGs())
   549  }
   550  
   551  // wake wakes up one or more goroutines to process the cleanup queue. If there aren't
   552  // enough sleeping goroutines to handle the demand, wake will arrange for new goroutines
   553  // to be created.
   554  func (q *cleanupQueue) wake() {
   555  	lock(&q.lock)
   556  
   557  	// Figure out how many goroutines to wake, and how many extra goroutines to create.
   558  	// Wake one goroutine for each work unit.
   559  	var wake, extra uint32
   560  	work := q.workUnits.Load()
   561  	asleep := uint64(q.asleep.Load())
   562  	if work > asleep {
   563  		wake = uint32(asleep)
   564  		if work > uint64(math.MaxUint32) {
   565  			// Protect against overflow.
   566  			extra = math.MaxUint32
   567  		} else {
   568  			extra = uint32(work - asleep)
   569  		}
   570  	} else {
   571  		wake = uint32(work)
   572  		extra = 0
   573  	}
   574  	if extra != 0 {
   575  		// Signal that we should create new goroutines, one for each extra work unit,
   576  		// up to maxCleanupGs.
   577  		newg := min(extra, maxCleanupGs()-q.ng.Load())
   578  		if newg > 0 {
   579  			q.needg.Add(int32(newg))
   580  		}
   581  	}
   582  	if wake == 0 {
   583  		// Nothing to do.
   584  		unlock(&q.lock)
   585  		return
   586  	}
   587  
   588  	// Take ownership of waking 'wake' goroutines.
   589  	//
   590  	// Nobody else will wake up these goroutines, so they're guaranteed
   591  	// to be sitting on q.sleeping, waiting for us to wake them.
   592  	q.asleep.Add(-int32(wake))
   593  
   594  	// Collect them and schedule them.
   595  	var list gList
   596  	for range wake {
   597  		list.push(q.sleeping.pop())
   598  	}
   599  	unlock(&q.lock)
   600  
   601  	injectglist(&list)
   602  	return
   603  }
   604  
   605  func (q *cleanupQueue) needG() bool {
   606  	have := q.ng.Load()
   607  	if have >= maxCleanupGs() {
   608  		return false
   609  	}
   610  	if have == 0 {
   611  		// Make sure we have at least one.
   612  		return true
   613  	}
   614  	return q.needg.Load() > 0
   615  }
   616  
   617  func (q *cleanupQueue) createGs() {
   618  	lock(&q.lock)
   619  	have := q.ng.Load()
   620  	need := min(q.needg.Swap(0), maxCleanupGs()-have)
   621  	if have == 0 && need == 0 {
   622  		// Make sure we have at least one.
   623  		need = 1
   624  	}
   625  	if need > 0 {
   626  		q.ng.Add(int32(need))
   627  	}
   628  	unlock(&q.lock)
   629  
   630  	for range need {
   631  		go runCleanups()
   632  	}
   633  }
   634  
   635  func (q *cleanupQueue) beginRunningCleanups() {
   636  	// Update runningCleanups and running atomically with respect
   637  	// to goroutine profiles by disabling preemption.
   638  	mp := acquirem()
   639  	getg().runningCleanups.Store(true)
   640  	q.running.Add(1)
   641  	releasem(mp)
   642  }
   643  
   644  func (q *cleanupQueue) endRunningCleanups() {
   645  	// Update runningCleanups and running atomically with respect
   646  	// to goroutine profiles by disabling preemption.
   647  	mp := acquirem()
   648  	getg().runningCleanups.Store(false)
   649  	q.running.Add(-1)
   650  	releasem(mp)
   651  }
   652  
   653  func (q *cleanupQueue) readQueueStats() (queued, executed uint64) {
   654  	executed = q.executed.Load()
   655  	queued = q.queued
   656  
   657  	// N.B. This is inconsistent, but that's intentional. It's just an estimate.
   658  	// Read this _after_ reading executed to decrease the chance that we observe
   659  	// an inconsistency in the statistics (executed > queued).
   660  	for _, pp := range allp {
   661  		queued += pp.cleanupsQueued
   662  	}
   663  	return
   664  }
   665  
   666  func maxCleanupGs() uint32 {
   667  	// N.B. Left as a function to make changing the policy easier.
   668  	return uint32(max(gomaxprocs/4, 1))
   669  }
   670  
   671  // gcCleanups is the global cleanup queue.
   672  var gcCleanups cleanupQueue
   673  
   674  // runCleanups is the entrypoint for all cleanup-running goroutines.
   675  func runCleanups() {
   676  	for {
   677  		b := gcCleanups.dequeue()
   678  		if raceenabled {
   679  			// Approximately: adds a happens-before edge between the cleanup
   680  			// argument being mutated and the call to the cleanup below.
   681  			racefingo()
   682  		}
   683  
   684  		gcCleanups.beginRunningCleanups()
   685  		for i := 0; i < int(b.n); i++ {
   686  			c := b.cleanups[i]
   687  			b.cleanups[i] = cleanupFn{}
   688  
   689  			var racectx uintptr
   690  			if raceenabled {
   691  				// Enter a new race context so the race detector can catch
   692  				// potential races between cleanups, even if they execute on
   693  				// the same goroutine.
   694  				//
   695  				// Synchronize on fn. This would fail to find races on the
   696  				// closed-over values in fn (suppose arg is passed to multiple
   697  				// AddCleanup calls) if arg was not unique, but it is.
   698  				racerelease(unsafe.Pointer(c.arg))
   699  				racectx = raceEnterNewCtx()
   700  				raceacquire(unsafe.Pointer(c.arg))
   701  			}
   702  
   703  			// Execute the next cleanup.
   704  			c.call(c.fn, c.arg)
   705  
   706  			if raceenabled {
   707  				// Restore the old context.
   708  				raceRestoreCtx(racectx)
   709  			}
   710  		}
   711  		gcCleanups.endRunningCleanups()
   712  		gcCleanups.executed.Add(int64(b.n))
   713  
   714  		atomic.Store(&b.n, 0) // Synchronize with markroot. See comment in cleanupBlockHeader.
   715  		gcCleanups.free.push(&b.lfnode)
   716  	}
   717  }
   718  
   719  // blockUntilEmpty blocks until either the cleanup queue is emptied
   720  // and the cleanups have been executed, or the timeout is reached.
   721  // Returns true if the cleanup queue was emptied.
   722  // This is used by the sync and unique tests.
   723  func (q *cleanupQueue) blockUntilEmpty(timeout int64) bool {
   724  	start := nanotime()
   725  	for nanotime()-start < timeout {
   726  		lock(&q.lock)
   727  		// The queue is empty when there's no work left to do *and* all the cleanup goroutines
   728  		// are asleep. If they're not asleep, they may be actively working on a block.
   729  		if q.flushed.Load() && q.full.empty() && uint32(q.sleeping.size) == q.ng.Load() {
   730  			unlock(&q.lock)
   731  			return true
   732  		}
   733  		unlock(&q.lock)
   734  		Gosched()
   735  	}
   736  	return false
   737  }
   738  
   739  //go:linkname unique_runtime_blockUntilEmptyCleanupQueue unique.runtime_blockUntilEmptyCleanupQueue
   740  func unique_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
   741  	return gcCleanups.blockUntilEmpty(timeout)
   742  }
   743  
   744  //go:linkname sync_test_runtime_blockUntilEmptyCleanupQueue sync_test.runtime_blockUntilEmptyCleanupQueue
   745  func sync_test_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
   746  	return gcCleanups.blockUntilEmpty(timeout)
   747  }
   748  
   749  // raceEnterNewCtx creates a new racectx and switches the current
   750  // goroutine to it. Returns the old racectx.
   751  //
   752  // Must be running on a user goroutine. nosplit to match other race
   753  // instrumentation.
   754  //
   755  //go:nosplit
   756  func raceEnterNewCtx() uintptr {
   757  	// We use the existing ctx as the spawn context, but gp.gopc
   758  	// as the spawn PC to make the error output a little nicer
   759  	// (pointing to AddCleanup, where the goroutines are created).
   760  	//
   761  	// We also need to carefully indicate to the race detector
   762  	// that the goroutine stack will only be accessed by the new
   763  	// race context, to avoid false positives on stack locations.
   764  	// We do this by marking the stack as free in the first context
   765  	// and then re-marking it as allocated in the second. Crucially,
   766  	// there must be (1) no race operations and (2) no stack changes
   767  	// in between. (1) is easy to avoid because we're in the runtime
   768  	// so there's no implicit race instrumentation. To avoid (2) we
   769  	// defensively become non-preemptible so the GC can't stop us,
   770  	// and rely on the fact that racemalloc, racefreem, and racectx
   771  	// are nosplit.
   772  	mp := acquirem()
   773  	gp := getg()
   774  	ctx := getg().racectx
   775  	racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
   776  	getg().racectx = racectxstart(gp.gopc, ctx)
   777  	racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
   778  	releasem(mp)
   779  	return ctx
   780  }
   781  
   782  // raceRestoreCtx restores ctx on the goroutine. It is the inverse of
   783  // raceenternewctx and must be called with its result.
   784  //
   785  // Must be running on a user goroutine. nosplit to match other race
   786  // instrumentation.
   787  //
   788  //go:nosplit
   789  func raceRestoreCtx(ctx uintptr) {
   790  	mp := acquirem()
   791  	gp := getg()
   792  	racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
   793  	racectxend(getg().racectx)
   794  	racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
   795  	getg().racectx = ctx
   796  	releasem(mp)
   797  }
   798  

View as plain text