Source file src/runtime/vgetrandom_linux.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build linux && (amd64 || arm64 || arm64be || ppc64 || ppc64le || loong64 || s390x)
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/cpu"
    11  	"internal/goexperiment"
    12  	"unsafe"
    13  )
    14  
    15  //go:noescape
    16  func vgetrandom1(buf *byte, length uintptr, flags uint32, state uintptr, stateSize uintptr) int
    17  
    18  var vgetrandomAlloc struct {
    19  	states     []uintptr
    20  	statesLock mutex
    21  	stateSize  uintptr
    22  	mmapProt   int32
    23  	mmapFlags  int32
    24  }
    25  
    26  func vgetrandomInit() {
    27  	if vdsoGetrandomSym == 0 {
    28  		return
    29  	}
    30  
    31  	var params struct {
    32  		SizeOfOpaqueState uint32
    33  		MmapProt          uint32
    34  		MmapFlags         uint32
    35  		reserved          [13]uint32
    36  	}
    37  	if vgetrandom1(nil, 0, 0, uintptr(unsafe.Pointer(&params)), ^uintptr(0)) != 0 {
    38  		return
    39  	}
    40  	vgetrandomAlloc.stateSize = uintptr(params.SizeOfOpaqueState)
    41  	vgetrandomAlloc.mmapProt = int32(params.MmapProt)
    42  	vgetrandomAlloc.mmapFlags = int32(params.MmapFlags)
    43  
    44  	lockInit(&vgetrandomAlloc.statesLock, lockRankVgetrandom)
    45  }
    46  
    47  func vgetrandomGetState() uintptr {
    48  	lock(&vgetrandomAlloc.statesLock)
    49  	if len(vgetrandomAlloc.states) == 0 {
    50  		num := uintptr(numCPUStartup) // Just a reasonable size hint to start.
    51  		stateSizeCacheAligned := (vgetrandomAlloc.stateSize + cpu.CacheLineSize - 1) &^ (cpu.CacheLineSize - 1)
    52  		allocSize := (num*stateSizeCacheAligned + physPageSize - 1) &^ (physPageSize - 1)
    53  		num = (physPageSize / stateSizeCacheAligned) * (allocSize / physPageSize)
    54  		p, err := mmap(nil, allocSize, vgetrandomAlloc.mmapProt, vgetrandomAlloc.mmapFlags, -1, 0)
    55  		if err != 0 {
    56  			unlock(&vgetrandomAlloc.statesLock)
    57  			return 0
    58  		}
    59  		setVMAName(p, allocSize, "getrandom states")
    60  		newBlock := uintptr(p)
    61  		if vgetrandomAlloc.states == nil {
    62  			vgetrandomAlloc.states = make([]uintptr, 0, num)
    63  		}
    64  		for i := uintptr(0); i < num; i++ {
    65  			if (newBlock&(physPageSize-1))+vgetrandomAlloc.stateSize > physPageSize {
    66  				newBlock = (newBlock + physPageSize - 1) &^ (physPageSize - 1)
    67  			}
    68  			vgetrandomAlloc.states = append(vgetrandomAlloc.states, newBlock)
    69  			newBlock += stateSizeCacheAligned
    70  		}
    71  	}
    72  	state := vgetrandomAlloc.states[len(vgetrandomAlloc.states)-1]
    73  	vgetrandomAlloc.states = vgetrandomAlloc.states[:len(vgetrandomAlloc.states)-1]
    74  	unlock(&vgetrandomAlloc.statesLock)
    75  	return state
    76  }
    77  
    78  // Free vgetrandom state from the M (if any) prior to destroying the M.
    79  //
    80  // This may allocate, so it must have a P.
    81  func vgetrandomDestroy(mp *m) {
    82  	if mp.vgetrandomState == 0 {
    83  		return
    84  	}
    85  
    86  	lock(&vgetrandomAlloc.statesLock)
    87  	vgetrandomAlloc.states = append(vgetrandomAlloc.states, mp.vgetrandomState)
    88  	unlock(&vgetrandomAlloc.statesLock)
    89  }
    90  
    91  // This is exported for use in internal/syscall/unix as well as x/sys/unix.
    92  //
    93  //go:linkname vgetrandom
    94  func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
    95  	if vgetrandomAlloc.stateSize == 0 {
    96  		return -1, false
    97  	}
    98  
    99  	// vDSO code may spill registers to the stack
   100  	// Make sure they're zeroed if we're running in secret mode
   101  	gp := getg()
   102  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   103  		secretEraseRegisters()
   104  	}
   105  
   106  	// We use getg().m instead of acquirem() here, because always taking
   107  	// the lock is slightly more expensive than not always taking the lock.
   108  	// However, we *do* require that m doesn't migrate elsewhere during the
   109  	// execution of the vDSO. So, we exploit two details:
   110  	//   1) Asynchronous preemption is aborted when PC is in the runtime.
   111  	//   2) Most of the time, this function only calls vgetrandom1(), which
   112  	//      does not have a preamble that synchronously preempts.
   113  	// We do need to take the lock when getting a new state for m, but this
   114  	// is very much the slow path, in the sense that it only ever happens
   115  	// once over the entire lifetime of an m. So, a simple getg().m suffices.
   116  	mp := getg().m
   117  
   118  	if mp.vgetrandomState == 0 {
   119  		mp.locks++
   120  		state := vgetrandomGetState()
   121  		mp.locks--
   122  		if state == 0 {
   123  			return -1, false
   124  		}
   125  		mp.vgetrandomState = state
   126  	}
   127  	return vgetrandom1(unsafe.SliceData(p), uintptr(len(p)), flags, mp.vgetrandomState, vgetrandomAlloc.stateSize), true
   128  }
   129  

View as plain text