Source file src/runtime/debug.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/runtime/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // GOMAXPROCS sets the maximum number of CPUs that can be executing
    13  // simultaneously and returns the previous setting. It defaults to
    14  // the value of [runtime.NumCPU]. If n < 1, it does not change the current setting.
    15  // This call will go away when the scheduler improves.
    16  func GOMAXPROCS(n int) int {
    17  	if GOARCH == "wasm" && n > 1 {
    18  		n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
    19  	}
    20  
    21  	lock(&sched.lock)
    22  	ret := int(gomaxprocs)
    23  	unlock(&sched.lock)
    24  	if n <= 0 || n == ret {
    25  		return ret
    26  	}
    27  
    28  	stw := stopTheWorldGC(stwGOMAXPROCS)
    29  
    30  	// newprocs will be processed by startTheWorld
    31  	newprocs = int32(n)
    32  
    33  	startTheWorldGC(stw)
    34  	return ret
    35  }
    36  
    37  // NumCPU returns the number of logical CPUs usable by the current process.
    38  //
    39  // The set of available CPUs is checked by querying the operating system
    40  // at process startup. Changes to operating system CPU allocation after
    41  // process startup are not reflected.
    42  func NumCPU() int {
    43  	return int(ncpu)
    44  }
    45  
    46  // NumCgoCall returns the number of cgo calls made by the current process.
    47  func NumCgoCall() int64 {
    48  	var n = int64(atomic.Load64(&ncgocall))
    49  	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
    50  		n += int64(mp.ncgocall)
    51  	}
    52  	return n
    53  }
    54  
    55  func totalMutexWaitTimeNanos() int64 {
    56  	total := sched.totalMutexWaitTime.Load()
    57  
    58  	total += sched.totalRuntimeLockWaitTime.Load()
    59  	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
    60  		total += mp.mLockProfile.waitTime.Load()
    61  	}
    62  
    63  	return total
    64  }
    65  
    66  // NumGoroutine returns the number of goroutines that currently exist.
    67  func NumGoroutine() int {
    68  	return int(gcount())
    69  }
    70  
    71  //go:linkname debug_modinfo runtime/debug.modinfo
    72  func debug_modinfo() string {
    73  	return modinfo
    74  }
    75  
    76  // mayMoreStackPreempt is a maymorestack hook that forces a preemption
    77  // at every possible cooperative preemption point.
    78  //
    79  // This is valuable to apply to the runtime, which can be sensitive to
    80  // preemption points. To apply this to all preemption points in the
    81  // runtime and runtime-like code, use the following in bash or zsh:
    82  //
    83  //	X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
    84  //
    85  // This must be deeply nosplit because it is called from a function
    86  // prologue before the stack is set up and because the compiler will
    87  // call it from any splittable prologue (leading to infinite
    88  // recursion).
    89  //
    90  // Ideally it should also use very little stack because the linker
    91  // doesn't currently account for this in nosplit stack depth checking.
    92  //
    93  // Ensure mayMoreStackPreempt can be called for all ABIs.
    94  //
    95  //go:nosplit
    96  //go:linkname mayMoreStackPreempt
    97  func mayMoreStackPreempt() {
    98  	// Don't do anything on the g0 or gsignal stack.
    99  	gp := getg()
   100  	if gp == gp.m.g0 || gp == gp.m.gsignal {
   101  		return
   102  	}
   103  	// Force a preemption, unless the stack is already poisoned.
   104  	if gp.stackguard0 < stackPoisonMin {
   105  		gp.stackguard0 = stackPreempt
   106  	}
   107  }
   108  
   109  // mayMoreStackMove is a maymorestack hook that forces stack movement
   110  // at every possible point.
   111  //
   112  // See mayMoreStackPreempt.
   113  //
   114  //go:nosplit
   115  //go:linkname mayMoreStackMove
   116  func mayMoreStackMove() {
   117  	// Don't do anything on the g0 or gsignal stack.
   118  	gp := getg()
   119  	if gp == gp.m.g0 || gp == gp.m.gsignal {
   120  		return
   121  	}
   122  	// Force stack movement, unless the stack is already poisoned.
   123  	if gp.stackguard0 < stackPoisonMin {
   124  		gp.stackguard0 = stackForceMove
   125  	}
   126  }
   127  
   128  // debugPinnerKeepUnpin is used to make runtime.(*Pinner).Unpin reachable.
   129  var debugPinnerKeepUnpin bool = false
   130  
   131  // debugPinnerV1 returns a new Pinner that pins itself. This function can be
   132  // used by debuggers to easily obtain a Pinner that will not be garbage
   133  // collected (or moved in memory) even if no references to it exist in the
   134  // target program. This pinner in turn can be used to extend this property
   135  // to other objects, which debuggers can use to simplify the evaluation of
   136  // expressions involving multiple call injections.
   137  func debugPinnerV1() *Pinner {
   138  	p := new(Pinner)
   139  	p.Pin(unsafe.Pointer(p))
   140  	if debugPinnerKeepUnpin {
   141  		// Make Unpin reachable.
   142  		p.Unpin()
   143  	}
   144  	return p
   145  }
   146  

View as plain text