Source file src/runtime/preempt_xreg.go

     1  // Copyright 2025 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build amd64
     6  
     7  // This provides common support for architectures that use extended register
     8  // state in asynchronous preemption.
     9  //
    10  // While asynchronous preemption stores general-purpose (GP) registers on the
    11  // preempted goroutine's own stack, extended register state can be used to save
    12  // non-GP state off the stack. In particular, this is meant for large vector
    13  // register files. Currently, we assume this contains only scalar data, though
    14  // we could change this constraint by conservatively scanning this memory.
    15  //
    16  // For an architecture to support extended register state, it must provide a Go
    17  // definition of an xRegState type for storing the state, and its asyncPreempt
    18  // implementation must write this register state to p.xRegs.scratch.
    19  
    20  package runtime
    21  
    22  import (
    23  	"internal/runtime/sys"
    24  	"unsafe"
    25  )
    26  
    27  // xRegState is long-lived extended register state. It is allocated off-heap and
    28  // manually managed.
    29  type xRegState struct {
    30  	_    sys.NotInHeap // Allocated from xRegAlloc
    31  	regs xRegs
    32  }
    33  
    34  // xRegPerG stores extended register state while a goroutine is asynchronously
    35  // preempted. This is nil otherwise, so we can reuse a (likely small) pool of
    36  // xRegState objects.
    37  type xRegPerG struct {
    38  	state *xRegState
    39  }
    40  
    41  type xRegPerP struct {
    42  	// scratch temporary per-P space where [asyncPreempt] saves the register
    43  	// state before entering Go. It's quickly copied to per-G state.
    44  	scratch xRegs
    45  
    46  	// cache is a 1-element allocation cache of extended register state used by
    47  	// asynchronous preemption. On entry to preemption, this is used as a simple
    48  	// allocation cache. On exit from preemption, the G's xRegState is always
    49  	// stored here where it can be restored, and later either freed or reused
    50  	// for another preemption. On exit, this serves the dual purpose of
    51  	// delay-freeing the allocated xRegState until after we've definitely
    52  	// restored it.
    53  	cache *xRegState
    54  }
    55  
    56  // xRegAlloc allocates xRegState objects.
    57  var xRegAlloc struct {
    58  	lock  mutex
    59  	alloc fixalloc
    60  }
    61  
    62  func xRegInitAlloc() {
    63  	lockInit(&xRegAlloc.lock, lockRankXRegAlloc)
    64  	xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys)
    65  }
    66  
    67  // xRegSave saves the extended register state on this P to gp.
    68  //
    69  // This must run on the system stack because it assumes the P won't change.
    70  //
    71  //go:systemstack
    72  func xRegSave(gp *g) {
    73  	if gp.xRegs.state != nil {
    74  		// Double preempt?
    75  		throw("gp.xRegState.p != nil on async preempt")
    76  	}
    77  
    78  	// Get the place to save the register state.
    79  	var dest *xRegState
    80  	pp := gp.m.p.ptr()
    81  	if pp.xRegs.cache != nil {
    82  		// Use the cached allocation.
    83  		dest = pp.xRegs.cache
    84  		pp.xRegs.cache = nil
    85  	} else {
    86  		// Allocate a new save block.
    87  		lock(&xRegAlloc.lock)
    88  		dest = (*xRegState)(xRegAlloc.alloc.alloc())
    89  		unlock(&xRegAlloc.lock)
    90  	}
    91  
    92  	// Copy state saved in the scratchpad to dest.
    93  	//
    94  	// If we ever need to save less state (e.g., avoid saving vector registers
    95  	// that aren't in use), we could have multiple allocation pools for
    96  	// different size states and copy only the registers we need.
    97  	dest.regs = pp.xRegs.scratch
    98  
    99  	// Save on the G.
   100  	gp.xRegs.state = dest
   101  }
   102  
   103  // xRegRestore prepares the extended register state on gp to be restored.
   104  //
   105  // It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find
   106  // it. This means nothing else may use the cache between this call and the
   107  // return to asyncPreempt. This is not quite symmetric with [xRegSave], which
   108  // uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy.
   109  //
   110  // This is called with asyncPreempt on the stack and thus must not grow the
   111  // stack.
   112  //
   113  //go:nosplit
   114  func xRegRestore(gp *g) {
   115  	if gp.xRegs.state == nil {
   116  		throw("gp.xRegState.p == nil on return from async preempt")
   117  	}
   118  	// If the P has a block cached on it, free that so we can replace it.
   119  	pp := gp.m.p.ptr()
   120  	if pp.xRegs.cache != nil {
   121  		// Don't grow the G stack.
   122  		systemstack(func() {
   123  			pp.xRegs.free()
   124  		})
   125  	}
   126  	pp.xRegs.cache = gp.xRegs.state
   127  	gp.xRegs.state = nil
   128  }
   129  
   130  func (xRegs *xRegPerP) free() {
   131  	if xRegs.cache != nil {
   132  		lock(&xRegAlloc.lock)
   133  		xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache))
   134  		xRegs.cache = nil
   135  		unlock(&xRegAlloc.lock)
   136  	}
   137  }
   138  

View as plain text