Source file src/runtime/debug.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/runtime/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // GOMAXPROCS sets the maximum number of CPUs that can be executing
    13  // simultaneously and returns the previous setting. If n < 1, it does not change
    14  // the current setting.
    15  //
    16  // If the GOMAXPROCS environment variable is set to a positive whole number,
    17  // GOMAXPROCS defaults to that value.
    18  //
    19  // Otherwise, the Go runtime selects an appropriate default value based on the
    20  // number of logical CPUs on the machine, the process’s CPU affinity mask, and,
    21  // on Linux, the process’s average CPU throughput limit based on cgroup CPU
    22  // quota, if any.
    23  //
    24  // The Go runtime periodically updates the default value based on changes to
    25  // the total logical CPU count, the CPU affinity mask, or cgroup quota. Setting
    26  // a custom value with the GOMAXPROCS environment variable or by calling
    27  // GOMAXPROCS disables automatic updates. The default value and automatic
    28  // updates can be restored by calling [SetDefaultGOMAXPROCS].
    29  //
    30  // If GODEBUG=containermaxprocs=0 is set, GOMAXPROCS defaults to the value of
    31  // [runtime.NumCPU]. If GODEBUG=updatemaxprocs=0 is set, the Go runtime does
    32  // not perform automatic GOMAXPROCS updating.
    33  //
    34  // The default GOMAXPROCS behavior may change as the scheduler improves.
    35  func GOMAXPROCS(n int) int {
    36  	if GOARCH == "wasm" && n > 1 {
    37  		n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
    38  	}
    39  
    40  	lock(&sched.lock)
    41  	ret := int(gomaxprocs)
    42  	if n <= 0 {
    43  		unlock(&sched.lock)
    44  		return ret
    45  	}
    46  	// Set early so we can wait for sysmon befor STW. See comment on
    47  	// computeMaxProcsLock.
    48  	sched.customGOMAXPROCS = true
    49  	unlock(&sched.lock)
    50  
    51  	// Wait for sysmon to complete running defaultGOMAXPROCS.
    52  	lock(&computeMaxProcsLock)
    53  	unlock(&computeMaxProcsLock)
    54  
    55  	if n == ret {
    56  		// sched.customGOMAXPROCS set, but no need to actually STW
    57  		// since the gomaxprocs itself isn't changing.
    58  		return ret
    59  	}
    60  
    61  	stw := stopTheWorldGC(stwGOMAXPROCS)
    62  
    63  	// newprocs will be processed by startTheWorld
    64  	//
    65  	// TODO(prattmic): this could use a nicer API. Perhaps add it to the
    66  	// stw parameter?
    67  	newprocs = int32(n)
    68  
    69  	startTheWorldGC(stw)
    70  	return ret
    71  }
    72  
    73  // SetDefaultGOMAXPROCS updates the GOMAXPROCS setting to the runtime
    74  // default, as described by [GOMAXPROCS], ignoring the GOMAXPROCS
    75  // environment variable.
    76  //
    77  // SetDefaultGOMAXPROCS can be used to enable the default automatic updating
    78  // GOMAXPROCS behavior if it has been disabled by the GOMAXPROCS
    79  // environment variable or a prior call to [GOMAXPROCS], or to force an immediate
    80  // update if the caller is aware of a change to the total logical CPU count, CPU
    81  // affinity mask or cgroup quota.
    82  func SetDefaultGOMAXPROCS() {
    83  	// SetDefaultGOMAXPROCS conceptually means "[re]do what the runtime
    84  	// would do at startup if the GOMAXPROCS environment variable were
    85  	// unset." It still respects GODEBUG.
    86  
    87  	procs := defaultGOMAXPROCS(0)
    88  
    89  	lock(&sched.lock)
    90  	curr := gomaxprocs
    91  	custom := sched.customGOMAXPROCS
    92  	unlock(&sched.lock)
    93  
    94  	if !custom && procs == curr {
    95  		// Nothing to do if we're already using automatic GOMAXPROCS
    96  		// and the limit is unchanged.
    97  		return
    98  	}
    99  
   100  	stw := stopTheWorldGC(stwGOMAXPROCS)
   101  
   102  	// newprocs will be processed by startTheWorld
   103  	//
   104  	// TODO(prattmic): this could use a nicer API. Perhaps add it to the
   105  	// stw parameter?
   106  	newprocs = procs
   107  	lock(&sched.lock)
   108  	sched.customGOMAXPROCS = false
   109  	unlock(&sched.lock)
   110  
   111  	startTheWorldGC(stw)
   112  }
   113  
   114  // NumCPU returns the number of logical CPUs usable by the current process.
   115  //
   116  // The set of available CPUs is checked by querying the operating system
   117  // at process startup. Changes to operating system CPU allocation after
   118  // process startup are not reflected.
   119  func NumCPU() int {
   120  	return int(numCPUStartup)
   121  }
   122  
   123  // NumCgoCall returns the number of cgo calls made by the current process.
   124  func NumCgoCall() int64 {
   125  	var n = int64(atomic.Load64(&ncgocall))
   126  	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
   127  		n += int64(mp.ncgocall)
   128  	}
   129  	return n
   130  }
   131  
   132  func totalMutexWaitTimeNanos() int64 {
   133  	total := sched.totalMutexWaitTime.Load()
   134  
   135  	total += sched.totalRuntimeLockWaitTime.Load()
   136  	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
   137  		total += mp.mLockProfile.waitTime.Load()
   138  	}
   139  
   140  	return total
   141  }
   142  
   143  // NumGoroutine returns the number of goroutines that currently exist.
   144  func NumGoroutine() int {
   145  	return int(gcount())
   146  }
   147  
   148  //go:linkname debug_modinfo runtime/debug.modinfo
   149  func debug_modinfo() string {
   150  	return modinfo
   151  }
   152  
   153  // mayMoreStackPreempt is a maymorestack hook that forces a preemption
   154  // at every possible cooperative preemption point.
   155  //
   156  // This is valuable to apply to the runtime, which can be sensitive to
   157  // preemption points. To apply this to all preemption points in the
   158  // runtime and runtime-like code, use the following in bash or zsh:
   159  //
   160  //	X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
   161  //
   162  // This must be deeply nosplit because it is called from a function
   163  // prologue before the stack is set up and because the compiler will
   164  // call it from any splittable prologue (leading to infinite
   165  // recursion).
   166  //
   167  // Ideally it should also use very little stack because the linker
   168  // doesn't currently account for this in nosplit stack depth checking.
   169  //
   170  // Ensure mayMoreStackPreempt can be called for all ABIs.
   171  //
   172  //go:nosplit
   173  //go:linkname mayMoreStackPreempt
   174  func mayMoreStackPreempt() {
   175  	// Don't do anything on the g0 or gsignal stack.
   176  	gp := getg()
   177  	if gp == gp.m.g0 || gp == gp.m.gsignal {
   178  		return
   179  	}
   180  	// Force a preemption, unless the stack is already poisoned.
   181  	if gp.stackguard0 < stackPoisonMin {
   182  		gp.stackguard0 = stackPreempt
   183  	}
   184  }
   185  
   186  // mayMoreStackMove is a maymorestack hook that forces stack movement
   187  // at every possible point.
   188  //
   189  // See mayMoreStackPreempt.
   190  //
   191  //go:nosplit
   192  //go:linkname mayMoreStackMove
   193  func mayMoreStackMove() {
   194  	// Don't do anything on the g0 or gsignal stack.
   195  	gp := getg()
   196  	if gp == gp.m.g0 || gp == gp.m.gsignal {
   197  		return
   198  	}
   199  	// Force stack movement, unless the stack is already poisoned.
   200  	if gp.stackguard0 < stackPoisonMin {
   201  		gp.stackguard0 = stackForceMove
   202  	}
   203  }
   204  
   205  // debugPinnerKeepUnpin is used to make runtime.(*Pinner).Unpin reachable.
   206  var debugPinnerKeepUnpin bool = false
   207  
   208  // debugPinnerV1 returns a new Pinner that pins itself. This function can be
   209  // used by debuggers to easily obtain a Pinner that will not be garbage
   210  // collected (or moved in memory) even if no references to it exist in the
   211  // target program. This pinner in turn can be used to extend this property
   212  // to other objects, which debuggers can use to simplify the evaluation of
   213  // expressions involving multiple call injections.
   214  func debugPinnerV1() *Pinner {
   215  	p := new(Pinner)
   216  	p.Pin(unsafe.Pointer(p))
   217  	if debugPinnerKeepUnpin {
   218  		// Make Unpin reachable.
   219  		p.Unpin()
   220  	}
   221  	return p
   222  }
   223  

View as plain text