Source file src/internal/runtime/gc/scan/scan_go.go

     1  // Copyright 2025 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package scan
     6  
     7  import (
     8  	"internal/goarch"
     9  	"internal/runtime/gc"
    10  	"internal/runtime/sys"
    11  	"unsafe"
    12  )
    13  
    14  // ScanSpanPackedGo is an optimized pure Go implementation of ScanSpanPacked.
    15  func ScanSpanPackedGo(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) {
    16  	buf := newUnsafeBuf(bufp)
    17  	objBytes := uintptr(gc.SizeClassToSize[sizeClass])
    18  	// TODO(austin): Trim objMarks to the number of objects in this size class?
    19  	for markI, markWord := range objMarks {
    20  		for range sys.OnesCount64(uint64(markWord)) {
    21  			bitI := sys.TrailingZeros64(uint64(markWord))
    22  			markWord &^= 1 << bitI
    23  
    24  			objIndex := markI*goarch.PtrBits + bitI
    25  
    26  			// objStartInSpan is the index of the word from mem where the
    27  			// object stats. objEndInSpan points to the next object, i.e.
    28  			// it's an exclusive upper bound.
    29  			objStartInSpan := objBytes * uintptr(objIndex) / goarch.PtrSize
    30  			objEndInSpan := objStartInSpan + objBytes/goarch.PtrSize
    31  
    32  			// TODO: Another way to do this would be to extract the pointer mask
    33  			// for this object (it's at most 64 bits) and do a bit iteration
    34  			// over that.
    35  
    36  			for wordI := objStartInSpan; wordI < objEndInSpan; wordI++ {
    37  				val := *(*uintptr)(unsafe.Add(mem, wordI*goarch.PtrSize))
    38  				// Check if we should enqueue this word.
    39  				//
    40  				// We load the word before the check because, even though this
    41  				// can lead to loading much more than necessary, it's faster.
    42  				// Most likely this is because it warms up the hardware
    43  				// prefetcher much better, and gives us more time before we need
    44  				// the value.
    45  				//
    46  				// We discard values that can't possibly be useful pointers
    47  				// here, too, because this filters out a lot of words and does
    48  				// so with as little processing as possible.
    49  				//
    50  				// TODO: This is close to, but not entirely branchless.
    51  				isPtr := bool2int(ptrMask[wordI/goarch.PtrBits]&(1<<(wordI%goarch.PtrBits)) != 0)
    52  				isNonNil := bool2int(val >= 4096)
    53  				pred := isPtr&isNonNil != 0
    54  				buf.addIf(val, pred)
    55  			}
    56  		}
    57  	}
    58  	// We don't know the true size of bufp, but we can at least catch obvious errors
    59  	// in this function by making sure we didn't write more than gc.PageWords pointers
    60  	// into the buffer.
    61  	buf.check(gc.PageWords)
    62  	return int32(buf.n)
    63  }
    64  
    65  // unsafeBuf allows for appending to a buffer without bounds-checks or branches.
    66  type unsafeBuf[T any] struct {
    67  	base *T
    68  	n    int
    69  }
    70  
    71  func newUnsafeBuf[T any](base *T) unsafeBuf[T] {
    72  	return unsafeBuf[T]{base, 0}
    73  }
    74  
    75  // addIf appends a value to the buffer if the predicate is true.
    76  //
    77  // addIf speculatively writes to the next index of the buffer, so the caller
    78  // must be certain that such a write will still be in-bounds with respect
    79  // to the buffer's true capacity.
    80  func (b *unsafeBuf[T]) addIf(val T, pred bool) {
    81  	*(*T)(unsafe.Add(unsafe.Pointer(b.base), b.n*int(unsafe.Sizeof(val)))) = val
    82  	b.n += bool2int(pred)
    83  }
    84  
    85  // check performs a bounds check on speculative writes into the buffer.
    86  // Calling this shortly after a series of addIf calls is important to
    87  // catch any misuse as fast as possible. Separating the bounds check from
    88  // the append is more efficient, but one check to cover several appends is
    89  // still efficient and much more memory safe.
    90  func (b unsafeBuf[T]) check(cap int) {
    91  	// We fail even if b.n == cap because addIf speculatively writes one past b.n.
    92  	if b.n >= cap {
    93  		panic("unsafeBuf overflow")
    94  	}
    95  }
    96  
    97  func bool2int(x bool) int {
    98  	// This particular pattern gets optimized by the compiler.
    99  	var b int
   100  	if x {
   101  		b = 1
   102  	}
   103  	return b
   104  }
   105  

View as plain text