Source file src/runtime/mgcwork.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/goarch"
     9  	"internal/runtime/atomic"
    10  	"runtime/internal/sys"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	_WorkbufSize = 2048 // in bytes; larger values result in less contention
    16  
    17  	// workbufAlloc is the number of bytes to allocate at a time
    18  	// for new workbufs. This must be a multiple of pageSize and
    19  	// should be a multiple of _WorkbufSize.
    20  	//
    21  	// Larger values reduce workbuf allocation overhead. Smaller
    22  	// values reduce heap fragmentation.
    23  	workbufAlloc = 32 << 10
    24  )
    25  
    26  func init() {
    27  	if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
    28  		throw("bad workbufAlloc")
    29  	}
    30  }
    31  
    32  // Garbage collector work pool abstraction.
    33  //
    34  // This implements a producer/consumer model for pointers to grey
    35  // objects. A grey object is one that is marked and on a work
    36  // queue. A black object is marked and not on a work queue.
    37  //
    38  // Write barriers, root discovery, stack scanning, and object scanning
    39  // produce pointers to grey objects. Scanning consumes pointers to
    40  // grey objects, thus blackening them, and then scans them,
    41  // potentially producing new pointers to grey objects.
    42  
    43  // A gcWork provides the interface to produce and consume work for the
    44  // garbage collector.
    45  //
    46  // A gcWork can be used on the stack as follows:
    47  //
    48  //	(preemption must be disabled)
    49  //	gcw := &getg().m.p.ptr().gcw
    50  //	.. call gcw.put() to produce and gcw.tryGet() to consume ..
    51  //
    52  // It's important that any use of gcWork during the mark phase prevent
    53  // the garbage collector from transitioning to mark termination since
    54  // gcWork may locally hold GC work buffers. This can be done by
    55  // disabling preemption (systemstack or acquirem).
    56  type gcWork struct {
    57  	// wbuf1 and wbuf2 are the primary and secondary work buffers.
    58  	//
    59  	// This can be thought of as a stack of both work buffers'
    60  	// pointers concatenated. When we pop the last pointer, we
    61  	// shift the stack up by one work buffer by bringing in a new
    62  	// full buffer and discarding an empty one. When we fill both
    63  	// buffers, we shift the stack down by one work buffer by
    64  	// bringing in a new empty buffer and discarding a full one.
    65  	// This way we have one buffer's worth of hysteresis, which
    66  	// amortizes the cost of getting or putting a work buffer over
    67  	// at least one buffer of work and reduces contention on the
    68  	// global work lists.
    69  	//
    70  	// wbuf1 is always the buffer we're currently pushing to and
    71  	// popping from and wbuf2 is the buffer that will be discarded
    72  	// next.
    73  	//
    74  	// Invariant: Both wbuf1 and wbuf2 are nil or neither are.
    75  	wbuf1, wbuf2 *workbuf
    76  
    77  	// Bytes marked (blackened) on this gcWork. This is aggregated
    78  	// into work.bytesMarked by dispose.
    79  	bytesMarked uint64
    80  
    81  	// Heap scan work performed on this gcWork. This is aggregated into
    82  	// gcController by dispose and may also be flushed by callers.
    83  	// Other types of scan work are flushed immediately.
    84  	heapScanWork int64
    85  
    86  	// flushedWork indicates that a non-empty work buffer was
    87  	// flushed to the global work list since the last gcMarkDone
    88  	// termination check. Specifically, this indicates that this
    89  	// gcWork may have communicated work to another gcWork.
    90  	flushedWork bool
    91  }
    92  
    93  // Most of the methods of gcWork are go:nowritebarrierrec because the
    94  // write barrier itself can invoke gcWork methods but the methods are
    95  // not generally re-entrant. Hence, if a gcWork method invoked the
    96  // write barrier while the gcWork was in an inconsistent state, and
    97  // the write barrier in turn invoked a gcWork method, it could
    98  // permanently corrupt the gcWork.
    99  
   100  func (w *gcWork) init() {
   101  	w.wbuf1 = getempty()
   102  	wbuf2 := trygetfull()
   103  	if wbuf2 == nil {
   104  		wbuf2 = getempty()
   105  	}
   106  	w.wbuf2 = wbuf2
   107  }
   108  
   109  // put enqueues a pointer for the garbage collector to trace.
   110  // obj must point to the beginning of a heap object or an oblet.
   111  //
   112  //go:nowritebarrierrec
   113  func (w *gcWork) put(obj uintptr) {
   114  	flushed := false
   115  	wbuf := w.wbuf1
   116  	// Record that this may acquire the wbufSpans or heap lock to
   117  	// allocate a workbuf.
   118  	lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
   119  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   120  	if wbuf == nil {
   121  		w.init()
   122  		wbuf = w.wbuf1
   123  		// wbuf is empty at this point.
   124  	} else if wbuf.nobj == len(wbuf.obj) {
   125  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   126  		wbuf = w.wbuf1
   127  		if wbuf.nobj == len(wbuf.obj) {
   128  			putfull(wbuf)
   129  			w.flushedWork = true
   130  			wbuf = getempty()
   131  			w.wbuf1 = wbuf
   132  			flushed = true
   133  		}
   134  	}
   135  
   136  	wbuf.obj[wbuf.nobj] = obj
   137  	wbuf.nobj++
   138  
   139  	// If we put a buffer on full, let the GC controller know so
   140  	// it can encourage more workers to run. We delay this until
   141  	// the end of put so that w is in a consistent state, since
   142  	// enlistWorker may itself manipulate w.
   143  	if flushed && gcphase == _GCmark {
   144  		gcController.enlistWorker()
   145  	}
   146  }
   147  
   148  // putFast does a put and reports whether it can be done quickly
   149  // otherwise it returns false and the caller needs to call put.
   150  //
   151  //go:nowritebarrierrec
   152  func (w *gcWork) putFast(obj uintptr) bool {
   153  	wbuf := w.wbuf1
   154  	if wbuf == nil || wbuf.nobj == len(wbuf.obj) {
   155  		return false
   156  	}
   157  
   158  	wbuf.obj[wbuf.nobj] = obj
   159  	wbuf.nobj++
   160  	return true
   161  }
   162  
   163  // putBatch performs a put on every pointer in obj. See put for
   164  // constraints on these pointers.
   165  //
   166  //go:nowritebarrierrec
   167  func (w *gcWork) putBatch(obj []uintptr) {
   168  	if len(obj) == 0 {
   169  		return
   170  	}
   171  
   172  	flushed := false
   173  	wbuf := w.wbuf1
   174  	if wbuf == nil {
   175  		w.init()
   176  		wbuf = w.wbuf1
   177  	}
   178  
   179  	for len(obj) > 0 {
   180  		for wbuf.nobj == len(wbuf.obj) {
   181  			putfull(wbuf)
   182  			w.flushedWork = true
   183  			w.wbuf1, w.wbuf2 = w.wbuf2, getempty()
   184  			wbuf = w.wbuf1
   185  			flushed = true
   186  		}
   187  		n := copy(wbuf.obj[wbuf.nobj:], obj)
   188  		wbuf.nobj += n
   189  		obj = obj[n:]
   190  	}
   191  
   192  	if flushed && gcphase == _GCmark {
   193  		gcController.enlistWorker()
   194  	}
   195  }
   196  
   197  // tryGet dequeues a pointer for the garbage collector to trace.
   198  //
   199  // If there are no pointers remaining in this gcWork or in the global
   200  // queue, tryGet returns 0.  Note that there may still be pointers in
   201  // other gcWork instances or other caches.
   202  //
   203  //go:nowritebarrierrec
   204  func (w *gcWork) tryGet() uintptr {
   205  	wbuf := w.wbuf1
   206  	if wbuf == nil {
   207  		w.init()
   208  		wbuf = w.wbuf1
   209  		// wbuf is empty at this point.
   210  	}
   211  	if wbuf.nobj == 0 {
   212  		w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
   213  		wbuf = w.wbuf1
   214  		if wbuf.nobj == 0 {
   215  			owbuf := wbuf
   216  			wbuf = trygetfull()
   217  			if wbuf == nil {
   218  				return 0
   219  			}
   220  			putempty(owbuf)
   221  			w.wbuf1 = wbuf
   222  		}
   223  	}
   224  
   225  	wbuf.nobj--
   226  	return wbuf.obj[wbuf.nobj]
   227  }
   228  
   229  // tryGetFast dequeues a pointer for the garbage collector to trace
   230  // if one is readily available. Otherwise it returns 0 and
   231  // the caller is expected to call tryGet().
   232  //
   233  //go:nowritebarrierrec
   234  func (w *gcWork) tryGetFast() uintptr {
   235  	wbuf := w.wbuf1
   236  	if wbuf == nil || wbuf.nobj == 0 {
   237  		return 0
   238  	}
   239  
   240  	wbuf.nobj--
   241  	return wbuf.obj[wbuf.nobj]
   242  }
   243  
   244  // dispose returns any cached pointers to the global queue.
   245  // The buffers are being put on the full queue so that the
   246  // write barriers will not simply reacquire them before the
   247  // GC can inspect them. This helps reduce the mutator's
   248  // ability to hide pointers during the concurrent mark phase.
   249  //
   250  //go:nowritebarrierrec
   251  func (w *gcWork) dispose() {
   252  	if wbuf := w.wbuf1; wbuf != nil {
   253  		if wbuf.nobj == 0 {
   254  			putempty(wbuf)
   255  		} else {
   256  			putfull(wbuf)
   257  			w.flushedWork = true
   258  		}
   259  		w.wbuf1 = nil
   260  
   261  		wbuf = w.wbuf2
   262  		if wbuf.nobj == 0 {
   263  			putempty(wbuf)
   264  		} else {
   265  			putfull(wbuf)
   266  			w.flushedWork = true
   267  		}
   268  		w.wbuf2 = nil
   269  	}
   270  	if w.bytesMarked != 0 {
   271  		// dispose happens relatively infrequently. If this
   272  		// atomic becomes a problem, we should first try to
   273  		// dispose less and if necessary aggregate in a per-P
   274  		// counter.
   275  		atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
   276  		w.bytesMarked = 0
   277  	}
   278  	if w.heapScanWork != 0 {
   279  		gcController.heapScanWork.Add(w.heapScanWork)
   280  		w.heapScanWork = 0
   281  	}
   282  }
   283  
   284  // balance moves some work that's cached in this gcWork back on the
   285  // global queue.
   286  //
   287  //go:nowritebarrierrec
   288  func (w *gcWork) balance() {
   289  	if w.wbuf1 == nil {
   290  		return
   291  	}
   292  	if wbuf := w.wbuf2; wbuf.nobj != 0 {
   293  		putfull(wbuf)
   294  		w.flushedWork = true
   295  		w.wbuf2 = getempty()
   296  	} else if wbuf := w.wbuf1; wbuf.nobj > 4 {
   297  		w.wbuf1 = handoff(wbuf)
   298  		w.flushedWork = true // handoff did putfull
   299  	} else {
   300  		return
   301  	}
   302  	// We flushed a buffer to the full list, so wake a worker.
   303  	if gcphase == _GCmark {
   304  		gcController.enlistWorker()
   305  	}
   306  }
   307  
   308  // empty reports whether w has no mark work available.
   309  //
   310  //go:nowritebarrierrec
   311  func (w *gcWork) empty() bool {
   312  	return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
   313  }
   314  
   315  // Internally, the GC work pool is kept in arrays in work buffers.
   316  // The gcWork interface caches a work buffer until full (or empty) to
   317  // avoid contending on the global work buffer lists.
   318  
   319  type workbufhdr struct {
   320  	node lfnode // must be first
   321  	nobj int
   322  }
   323  
   324  type workbuf struct {
   325  	_ sys.NotInHeap
   326  	workbufhdr
   327  	// account for the above fields
   328  	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
   329  }
   330  
   331  // workbuf factory routines. These funcs are used to manage the
   332  // workbufs.
   333  // If the GC asks for some work these are the only routines that
   334  // make wbufs available to the GC.
   335  
   336  func (b *workbuf) checknonempty() {
   337  	if b.nobj == 0 {
   338  		throw("workbuf is empty")
   339  	}
   340  }
   341  
   342  func (b *workbuf) checkempty() {
   343  	if b.nobj != 0 {
   344  		throw("workbuf is not empty")
   345  	}
   346  }
   347  
   348  // getempty pops an empty work buffer off the work.empty list,
   349  // allocating new buffers if none are available.
   350  //
   351  //go:nowritebarrier
   352  func getempty() *workbuf {
   353  	var b *workbuf
   354  	if work.empty != 0 {
   355  		b = (*workbuf)(work.empty.pop())
   356  		if b != nil {
   357  			b.checkempty()
   358  		}
   359  	}
   360  	// Record that this may acquire the wbufSpans or heap lock to
   361  	// allocate a workbuf.
   362  	lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
   363  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   364  	if b == nil {
   365  		// Allocate more workbufs.
   366  		var s *mspan
   367  		if work.wbufSpans.free.first != nil {
   368  			lock(&work.wbufSpans.lock)
   369  			s = work.wbufSpans.free.first
   370  			if s != nil {
   371  				work.wbufSpans.free.remove(s)
   372  				work.wbufSpans.busy.insert(s)
   373  			}
   374  			unlock(&work.wbufSpans.lock)
   375  		}
   376  		if s == nil {
   377  			systemstack(func() {
   378  				s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
   379  			})
   380  			if s == nil {
   381  				throw("out of memory")
   382  			}
   383  			// Record the new span in the busy list.
   384  			lock(&work.wbufSpans.lock)
   385  			work.wbufSpans.busy.insert(s)
   386  			unlock(&work.wbufSpans.lock)
   387  		}
   388  		// Slice up the span into new workbufs. Return one and
   389  		// put the rest on the empty list.
   390  		for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
   391  			newb := (*workbuf)(unsafe.Pointer(s.base() + i))
   392  			newb.nobj = 0
   393  			lfnodeValidate(&newb.node)
   394  			if i == 0 {
   395  				b = newb
   396  			} else {
   397  				putempty(newb)
   398  			}
   399  		}
   400  	}
   401  	return b
   402  }
   403  
   404  // putempty puts a workbuf onto the work.empty list.
   405  // Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
   406  //
   407  //go:nowritebarrier
   408  func putempty(b *workbuf) {
   409  	b.checkempty()
   410  	work.empty.push(&b.node)
   411  }
   412  
   413  // putfull puts the workbuf on the work.full list for the GC.
   414  // putfull accepts partially full buffers so the GC can avoid competing
   415  // with the mutators for ownership of partially full buffers.
   416  //
   417  //go:nowritebarrier
   418  func putfull(b *workbuf) {
   419  	b.checknonempty()
   420  	work.full.push(&b.node)
   421  }
   422  
   423  // trygetfull tries to get a full or partially empty workbuffer.
   424  // If one is not immediately available return nil.
   425  //
   426  //go:nowritebarrier
   427  func trygetfull() *workbuf {
   428  	b := (*workbuf)(work.full.pop())
   429  	if b != nil {
   430  		b.checknonempty()
   431  		return b
   432  	}
   433  	return b
   434  }
   435  
   436  //go:nowritebarrier
   437  func handoff(b *workbuf) *workbuf {
   438  	// Make new buffer with half of b's pointers.
   439  	b1 := getempty()
   440  	n := b.nobj / 2
   441  	b.nobj -= n
   442  	b1.nobj = n
   443  	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
   444  
   445  	// Put b on full list - let first half of b get stolen.
   446  	putfull(b)
   447  	return b1
   448  }
   449  
   450  // prepareFreeWorkbufs moves busy workbuf spans to free list so they
   451  // can be freed to the heap. This must only be called when all
   452  // workbufs are on the empty list.
   453  func prepareFreeWorkbufs() {
   454  	lock(&work.wbufSpans.lock)
   455  	if work.full != 0 {
   456  		throw("cannot free workbufs when work.full != 0")
   457  	}
   458  	// Since all workbufs are on the empty list, we don't care
   459  	// which ones are in which spans. We can wipe the entire empty
   460  	// list and move all workbuf spans to the free list.
   461  	work.empty = 0
   462  	work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
   463  	unlock(&work.wbufSpans.lock)
   464  }
   465  
   466  // freeSomeWbufs frees some workbufs back to the heap and returns
   467  // true if it should be called again to free more.
   468  func freeSomeWbufs(preemptible bool) bool {
   469  	const batchSize = 64 // ~1–2 µs per span.
   470  	lock(&work.wbufSpans.lock)
   471  	if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
   472  		unlock(&work.wbufSpans.lock)
   473  		return false
   474  	}
   475  	systemstack(func() {
   476  		gp := getg().m.curg
   477  		for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
   478  			span := work.wbufSpans.free.first
   479  			if span == nil {
   480  				break
   481  			}
   482  			work.wbufSpans.free.remove(span)
   483  			mheap_.freeManual(span, spanAllocWorkBuf)
   484  		}
   485  	})
   486  	more := !work.wbufSpans.free.isEmpty()
   487  	unlock(&work.wbufSpans.lock)
   488  	return more
   489  }
   490  

View as plain text