Source file src/runtime/mgcmark_nogreenteagc.go

     1  // Copyright 2025 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !goexperiment.greenteagc
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/goarch"
    11  	"internal/runtime/gc"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
    17  	bytep, mask := s.gcmarkBits.bitp(objIndex)
    18  	return markBits{bytep, mask, objIndex}
    19  }
    20  
    21  func (s *mspan) markBitsForBase() markBits {
    22  	return markBits{&s.gcmarkBits.x, uint8(1), 0}
    23  }
    24  
    25  func tryDeferToSpanScan(p uintptr, gcw *gcWork) bool {
    26  	return false
    27  }
    28  
    29  func (s *mspan) initInlineMarkBits() {
    30  }
    31  
    32  func (s *mspan) moveInlineMarks(to *gcBits) {
    33  	throw("unimplemented")
    34  }
    35  
    36  func gcUsesSpanInlineMarkBits(_ uintptr) bool {
    37  	return false
    38  }
    39  
    40  func (s *mspan) inlineMarkBits() *spanInlineMarkBits {
    41  	return nil
    42  }
    43  
    44  func (s *mspan) scannedBitsForIndex(objIndex uintptr) markBits {
    45  	throw("unimplemented")
    46  	return markBits{}
    47  }
    48  
    49  type spanInlineMarkBits struct {
    50  }
    51  
    52  func (q *spanInlineMarkBits) tryAcquire() bool {
    53  	return false
    54  }
    55  
    56  type spanQueue struct {
    57  }
    58  
    59  func (q *spanQueue) flush() {
    60  }
    61  
    62  func (q *spanQueue) empty() bool {
    63  	return true
    64  }
    65  
    66  func (q *spanQueue) destroy() {
    67  }
    68  
    69  type spanSPMC struct {
    70  	_ sys.NotInHeap
    71  	allnode listNodeManual
    72  }
    73  
    74  func freeDeadSpanSPMCs() {
    75  	return
    76  }
    77  
    78  type objptr uintptr
    79  
    80  func (w *gcWork) tryGetSpanFast() objptr {
    81  	return 0
    82  }
    83  
    84  func (w *gcWork) tryGetSpan() objptr {
    85  	return 0
    86  }
    87  
    88  func (w *gcWork) tryStealSpan() objptr {
    89  	return 0
    90  }
    91  
    92  func scanSpan(p objptr, gcw *gcWork) {
    93  	throw("unimplemented")
    94  }
    95  
    96  type sizeClassScanStats struct {
    97  	sparseObjsScanned uint64
    98  }
    99  
   100  func dumpScanStats() {
   101  	var sparseObjsScanned uint64
   102  	for _, stats := range memstats.lastScanStats {
   103  		sparseObjsScanned += stats.sparseObjsScanned
   104  	}
   105  	print("scan: total ", sparseObjsScanned, " objs\n")
   106  	for i, stats := range memstats.lastScanStats {
   107  		if stats == (sizeClassScanStats{}) {
   108  			continue
   109  		}
   110  		if i == 0 {
   111  			print("scan: class L ")
   112  		} else {
   113  			print("scan: class ", gc.SizeClassToSize[i], "B ")
   114  		}
   115  		print(stats.sparseObjsScanned, " objs\n")
   116  	}
   117  }
   118  
   119  func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
   120  	for i := range w.stats {
   121  		dst[i].sparseObjsScanned += w.stats[i].sparseObjsScanned
   122  	}
   123  	clear(w.stats[:])
   124  }
   125  
   126  // gcMarkWorkAvailable reports whether there's any non-local work available to do.
   127  func gcMarkWorkAvailable() bool {
   128  	if !work.full.empty() {
   129  		return true // global work available
   130  	}
   131  	if work.markrootNext.Load() < work.markrootJobs.Load() {
   132  		return true // root scan work available
   133  	}
   134  	return false
   135  }
   136  
   137  // scanObject scans the object starting at b, adding pointers to gcw.
   138  // b must point to the beginning of a heap object or an oblet.
   139  // scanObject consults the GC bitmap for the pointer mask and the
   140  // spans for the size of the object.
   141  //
   142  //go:nowritebarrier
   143  func scanObject(b uintptr, gcw *gcWork) {
   144  	// Prefetch object before we scan it.
   145  	//
   146  	// This will overlap fetching the beginning of the object with initial
   147  	// setup before we start scanning the object.
   148  	sys.Prefetch(b)
   149  
   150  	// Find the bits for b and the size of the object at b.
   151  	//
   152  	// b is either the beginning of an object, in which case this
   153  	// is the size of the object to scan, or it points to an
   154  	// oblet, in which case we compute the size to scan below.
   155  	s := spanOfUnchecked(b)
   156  	n := s.elemsize
   157  	if n == 0 {
   158  		throw("scanObject n == 0")
   159  	}
   160  	if s.spanclass.noscan() {
   161  		// Correctness-wise this is ok, but it's inefficient
   162  		// if noscan objects reach here.
   163  		throw("scanObject of a noscan object")
   164  	}
   165  
   166  	var tp typePointers
   167  	if n > maxObletBytes {
   168  		// Large object. Break into oblets for better
   169  		// parallelism and lower latency.
   170  		if b == s.base() {
   171  			// Enqueue the other oblets to scan later.
   172  			// Some oblets may be in b's scalar tail, but
   173  			// these will be marked as "no more pointers",
   174  			// so we'll drop out immediately when we go to
   175  			// scan those.
   176  			for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
   177  				if !gcw.putObjFast(oblet) {
   178  					gcw.putObj(oblet)
   179  				}
   180  			}
   181  		}
   182  
   183  		// Compute the size of the oblet. Since this object
   184  		// must be a large object, s.base() is the beginning
   185  		// of the object.
   186  		n = s.base() + s.elemsize - b
   187  		n = min(n, maxObletBytes)
   188  		tp = s.typePointersOfUnchecked(s.base())
   189  		tp = tp.fastForward(b-tp.addr, b+n)
   190  	} else {
   191  		tp = s.typePointersOfUnchecked(b)
   192  	}
   193  
   194  	var scanSize uintptr
   195  	for {
   196  		var addr uintptr
   197  		if tp, addr = tp.nextFast(); addr == 0 {
   198  			if tp, addr = tp.next(b + n); addr == 0 {
   199  				break
   200  			}
   201  		}
   202  
   203  		// Keep track of farthest pointer we found, so we can
   204  		// update heapScanWork. TODO: is there a better metric,
   205  		// now that we can skip scalar portions pretty efficiently?
   206  		scanSize = addr - b + goarch.PtrSize
   207  
   208  		// Work here is duplicated in scanblock and above.
   209  		// If you make changes here, make changes there too.
   210  		obj := *(*uintptr)(unsafe.Pointer(addr))
   211  
   212  		// At this point we have extracted the next potential pointer.
   213  		// Quickly filter out nil and pointers back to the current object.
   214  		if obj != 0 && obj-b >= n {
   215  			// Test if obj points into the Go heap and, if so,
   216  			// mark the object.
   217  			//
   218  			// Note that it's possible for findObject to
   219  			// fail if obj points to a just-allocated heap
   220  			// object because of a race with growing the
   221  			// heap. In this case, we know the object was
   222  			// just allocated and hence will be marked by
   223  			// allocation itself.
   224  			if !tryDeferToSpanScan(obj, gcw) {
   225  				if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
   226  					greyobject(obj, b, addr-b, span, gcw, objIndex)
   227  				}
   228  			}
   229  		}
   230  	}
   231  	gcw.bytesMarked += uint64(n)
   232  	gcw.heapScanWork += int64(scanSize)
   233  	if debug.gctrace > 1 {
   234  		gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
   235  	}
   236  }
   237  

View as plain text