Source file src/runtime/runtime2.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/chacha8rand" 10 "internal/goarch" 11 "internal/runtime/atomic" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // defined constants 17 const ( 18 // G status 19 // 20 // Beyond indicating the general state of a G, the G status 21 // acts like a lock on the goroutine's stack (and hence its 22 // ability to execute user code). 23 // 24 // If you add to this list, add to the list 25 // of "okay during garbage collection" status 26 // in mgcmark.go too. 27 // 28 // TODO(austin): The _Gscan bit could be much lighter-weight. 29 // For example, we could choose not to run _Gscanrunnable 30 // goroutines found in the run queue, rather than CAS-looping 31 // until they become _Grunnable. And transitions like 32 // _Gscanwaiting -> _Gscanrunnable are actually okay because 33 // they don't affect stack ownership. 34 35 // _Gidle means this goroutine was just allocated and has not 36 // yet been initialized. 37 _Gidle = iota // 0 38 39 // _Grunnable means this goroutine is on a run queue. It is 40 // not currently executing user code. The stack is not owned. 41 _Grunnable // 1 42 43 // _Grunning means this goroutine may execute user code. The 44 // stack is owned by this goroutine. It is not on a run queue. 45 // It is assigned an M (g.m is valid) and it usually has a P 46 // (g.m.p is valid), but there are small windows of time where 47 // it might not, namely upon entering and exiting _Gsyscall. 48 _Grunning // 2 49 50 // _Gsyscall means this goroutine is executing a system call. 51 // It is not executing user code. The stack is owned by this 52 // goroutine. It is not on a run queue. It is assigned an M. 53 // It may have a P attached, but it does not own it. Code 54 // executing in this state must not touch g.m.p. 55 _Gsyscall // 3 56 57 // _Gwaiting means this goroutine is blocked in the runtime. 58 // It is not executing user code. It is not on a run queue, 59 // but should be recorded somewhere (e.g., a channel wait 60 // queue) so it can be ready()d when necessary. The stack is 61 // not owned *except* that a channel operation may read or 62 // write parts of the stack under the appropriate channel 63 // lock. Otherwise, it is not safe to access the stack after a 64 // goroutine enters _Gwaiting (e.g., it may get moved). 65 _Gwaiting // 4 66 67 // _Gmoribund_unused is currently unused, but hardcoded in gdb 68 // scripts. 69 _Gmoribund_unused // 5 70 71 // _Gdead means this goroutine is currently unused. It may be 72 // just exited, on a free list, or just being initialized. It 73 // is not executing user code. It may or may not have a stack 74 // allocated. The G and its stack (if any) are owned by the M 75 // that is exiting the G or that obtained the G from the free 76 // list. 77 _Gdead // 6 78 79 // _Genqueue_unused is currently unused. 80 _Genqueue_unused // 7 81 82 // _Gcopystack means this goroutine's stack is being moved. It 83 // is not executing user code and is not on a run queue. The 84 // stack is owned by the goroutine that put it in _Gcopystack. 85 _Gcopystack // 8 86 87 // _Gpreempted means this goroutine stopped itself for a 88 // suspendG preemption. It is like _Gwaiting, but nothing is 89 // yet responsible for ready()ing it. Some suspendG must CAS 90 // the status to _Gwaiting to take responsibility for 91 // ready()ing this G. 92 _Gpreempted // 9 93 94 // _Gleaked represents a leaked goroutine caught by the GC. 95 _Gleaked // 10 96 97 // _Gdeadextra is a _Gdead goroutine that's attached to an extra M 98 // used for cgo callbacks. 99 _Gdeadextra // 11 100 101 // _Gscan combined with one of the above states other than 102 // _Grunning indicates that GC is scanning the stack. The 103 // goroutine is not executing user code and the stack is owned 104 // by the goroutine that set the _Gscan bit. 105 // 106 // _Gscanrunning is different: it is used to briefly block 107 // state transitions while GC signals the G to scan its own 108 // stack. This is otherwise like _Grunning. 109 // 110 // atomicstatus&~Gscan gives the state the goroutine will 111 // return to when the scan completes. 112 _Gscan = 0x1000 113 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 114 _Gscanrunning = _Gscan + _Grunning // 0x1002 115 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 116 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 117 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 118 _Gscanleaked = _Gscan + _Gleaked // 0x100a 119 _Gscandeadextra = _Gscan + _Gdeadextra // 0x100b 120 ) 121 122 const ( 123 // P status 124 125 // _Pidle means a P is not being used to run user code or the 126 // scheduler. Typically, it's on the idle P list and available 127 // to the scheduler, but it may just be transitioning between 128 // other states. 129 // 130 // The P is owned by the idle list or by whatever is 131 // transitioning its state. Its run queue is empty. 132 _Pidle = iota 133 134 // _Prunning means a P is owned by an M and is being used to 135 // run user code or the scheduler. Only the M that owns this P 136 // is allowed to change the P's status from _Prunning. The M 137 // may transition the P to _Pidle (if it has no more work to 138 // do), or _Pgcstop (to halt for the GC). The M may also hand 139 // ownership of the P off directly to another M (for example, 140 // to schedule a locked G). 141 _Prunning 142 143 // _Psyscall_unused is a now-defunct state for a P. A P is 144 // identified as "in a system call" by looking at the goroutine's 145 // state. 146 _Psyscall_unused 147 148 // _Pgcstop means a P is halted for STW and owned by the M 149 // that stopped the world. The M that stopped the world 150 // continues to use its P, even in _Pgcstop. Transitioning 151 // from _Prunning to _Pgcstop causes an M to release its P and 152 // park. 153 // 154 // The P retains its run queue and startTheWorld will restart 155 // the scheduler on Ps with non-empty run queues. 156 _Pgcstop 157 158 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 159 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 160 // stripped of its resources, though a few things remain 161 // (e.g., trace buffers). 162 _Pdead 163 ) 164 165 // Mutual exclusion locks. In the uncontended case, 166 // as fast as spin locks (just a few user-level instructions), 167 // but on the contention path they sleep in the kernel. 168 // A zeroed Mutex is unlocked (no need to initialize each lock). 169 // Initialization is helpful for static lock ranking, but not required. 170 type mutex struct { 171 // Empty struct if lock ranking is disabled, otherwise includes the lock rank 172 lockRankStruct 173 // Futex-based impl treats it as uint32 key, 174 // while sema-based impl as M* waitm. 175 // Used to be a union, but unions break precise GC. 176 key uintptr 177 } 178 179 type funcval struct { 180 fn uintptr 181 // variable-size, fn-specific data here 182 } 183 184 type iface struct { 185 tab *itab 186 data unsafe.Pointer 187 } 188 189 type eface struct { 190 _type *_type 191 data unsafe.Pointer 192 } 193 194 func efaceOf(ep *any) *eface { 195 return (*eface)(unsafe.Pointer(ep)) 196 } 197 198 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 199 // It is particularly important to avoid write barriers when the current P has 200 // been released, because the GC thinks the world is stopped, and an 201 // unexpected write barrier would not be synchronized with the GC, 202 // which can lead to a half-executed write barrier that has marked the object 203 // but not queued it. If the GC skips the object and completes before the 204 // queuing can occur, it will incorrectly free the object. 205 // 206 // We tried using special assignment functions invoked only when not 207 // holding a running P, but then some updates to a particular memory 208 // word went through write barriers and some did not. This breaks the 209 // write barrier shadow checking mode, and it is also scary: better to have 210 // a word that is completely ignored by the GC than to have one for which 211 // only a few updates are ignored. 212 // 213 // Gs and Ps are always reachable via true pointers in the 214 // allgs and allp lists or (during allocation before they reach those lists) 215 // from stack variables. 216 // 217 // Ms are always reachable via true pointers either from allm or 218 // freem. Unlike Gs and Ps we do free Ms, so it's important that 219 // nothing ever hold an muintptr across a safe point. 220 221 // A guintptr holds a goroutine pointer, but typed as a uintptr 222 // to bypass write barriers. It is used in the Gobuf goroutine state 223 // and in scheduling lists that are manipulated without a P. 224 // 225 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 226 // In one of the few places it is updated by Go code - func save - it must be 227 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 228 // Instead of figuring out how to emit the write barriers missing in the 229 // assembly manipulation, we change the type of the field to uintptr, 230 // so that it does not require write barriers at all. 231 // 232 // Goroutine structs are published in the allg list and never freed. 233 // That will keep the goroutine structs from being collected. 234 // There is never a time that Gobuf.g's contain the only references 235 // to a goroutine: the publishing of the goroutine in allg comes first. 236 // Goroutine pointers are also kept in non-GC-visible places like TLS, 237 // so I can't see them ever moving. If we did want to start moving data 238 // in the GC, we'd need to allocate the goroutine structs from an 239 // alternate arena. Using guintptr doesn't make that problem any worse. 240 // Note that pollDesc.rg, pollDesc.wg also store g in uintptr form, 241 // so they would need to be updated too if g's start moving. 242 type guintptr uintptr 243 244 //go:nosplit 245 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 246 247 //go:nosplit 248 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 249 250 //go:nosplit 251 func (gp *guintptr) cas(old, new guintptr) bool { 252 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 253 } 254 255 //go:nosplit 256 func (gp *g) guintptr() guintptr { 257 return guintptr(unsafe.Pointer(gp)) 258 } 259 260 // setGNoWB performs *gp = new without a write barrier. 261 // For times when it's impractical to use a guintptr. 262 // 263 //go:nosplit 264 //go:nowritebarrier 265 func setGNoWB(gp **g, new *g) { 266 (*guintptr)(unsafe.Pointer(gp)).set(new) 267 } 268 269 type puintptr uintptr 270 271 //go:nosplit 272 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 273 274 //go:nosplit 275 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 276 277 // muintptr is a *m that is not tracked by the garbage collector. 278 // 279 // Because we do free Ms, there are some additional constrains on 280 // muintptrs: 281 // 282 // 1. Never hold an muintptr locally across a safe point. 283 // 284 // 2. Any muintptr in the heap must be owned by the M itself so it can 285 // ensure it is not in use when the last true *m is released. 286 type muintptr uintptr 287 288 //go:nosplit 289 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 290 291 //go:nosplit 292 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 293 294 // setMNoWB performs *mp = new without a write barrier. 295 // For times when it's impractical to use an muintptr. 296 // 297 //go:nosplit 298 //go:nowritebarrier 299 func setMNoWB(mp **m, new *m) { 300 (*muintptr)(unsafe.Pointer(mp)).set(new) 301 } 302 303 type gobuf struct { 304 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 305 // 306 // ctxt is unusual with respect to GC: it may be a 307 // heap-allocated funcval, so GC needs to track it, but it 308 // needs to be set and cleared from assembly, where it's 309 // difficult to have write barriers. However, ctxt is really a 310 // saved, live register, and we only ever exchange it between 311 // the real register and the gobuf. Hence, we treat it as a 312 // root during stack scanning, which means assembly that saves 313 // and restores it doesn't need write barriers. It's still 314 // typed as a pointer so that any other writes from Go get 315 // write barriers. 316 sp uintptr 317 pc uintptr 318 g guintptr 319 ctxt unsafe.Pointer 320 lr uintptr 321 bp uintptr // for framepointer-enabled architectures 322 } 323 324 // maybeTraceablePtr is a special pointer that is conditionally trackable 325 // by the GC. It consists of an address as a uintptr (vu) and a pointer 326 // to a data element (vp). 327 // 328 // maybeTraceablePtr values can be in one of three states: 329 // 1. Unset: vu == 0 && vp == nil 330 // 2. Untracked: vu != 0 && vp == nil 331 // 3. Tracked: vu != 0 && vp != nil 332 // 333 // Do not set fields manually. Use methods instead. 334 // Extend this type with additional methods if needed. 335 type maybeTraceablePtr struct { 336 vp unsafe.Pointer // For liveness only. 337 vu uintptr // Source of truth. 338 } 339 340 // untrack unsets the pointer but preserves the address. 341 // This is used to hide the pointer from the GC. 342 // 343 //go:nosplit 344 func (p *maybeTraceablePtr) setUntraceable() { 345 p.vp = nil 346 } 347 348 // setTraceable resets the pointer to the stored address. 349 // This is used to make the pointer visible to the GC. 350 // 351 //go:nosplit 352 func (p *maybeTraceablePtr) setTraceable() { 353 p.vp = unsafe.Pointer(p.vu) 354 } 355 356 // set sets the pointer to the data element and updates the address. 357 // 358 //go:nosplit 359 func (p *maybeTraceablePtr) set(v unsafe.Pointer) { 360 p.vp = v 361 p.vu = uintptr(v) 362 } 363 364 // get retrieves the pointer to the data element. 365 // 366 //go:nosplit 367 func (p *maybeTraceablePtr) get() unsafe.Pointer { 368 return unsafe.Pointer(p.vu) 369 } 370 371 // uintptr returns the uintptr address of the pointer. 372 // 373 //go:nosplit 374 func (p *maybeTraceablePtr) uintptr() uintptr { 375 return p.vu 376 } 377 378 // maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr) 379 // to track hchan pointers. 380 // 381 // Do not set fields manually. Use methods instead. 382 type maybeTraceableChan struct { 383 maybeTraceablePtr 384 } 385 386 //go:nosplit 387 func (p *maybeTraceableChan) set(c *hchan) { 388 p.maybeTraceablePtr.set(unsafe.Pointer(c)) 389 } 390 391 //go:nosplit 392 func (p *maybeTraceableChan) get() *hchan { 393 return (*hchan)(p.maybeTraceablePtr.get()) 394 } 395 396 // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving 397 // on a channel. 398 // 399 // sudog is necessary because the g ↔ synchronization object relation 400 // is many-to-many. A g can be on many wait lists, so there may be 401 // many sudogs for one g; and many gs may be waiting on the same 402 // synchronization object, so there may be many sudogs for one object. 403 // 404 // sudogs are allocated from a special pool. Use acquireSudog and 405 // releaseSudog to allocate and free them. 406 type sudog struct { 407 // The following fields are protected by the hchan.lock of the 408 // channel this sudog is blocking on. shrinkstack depends on 409 // this for sudogs involved in channel ops. 410 411 g *g 412 413 next *sudog 414 prev *sudog 415 416 elem maybeTraceablePtr // data element (may point to stack) 417 418 // The following fields are never accessed concurrently. 419 // For channels, waitlink is only accessed by g. 420 // For semaphores, all fields (including the ones above) 421 // are only accessed when holding a semaRoot lock. 422 423 acquiretime int64 424 releasetime int64 425 ticket uint32 426 427 // isSelect indicates g is participating in a select, so 428 // g.selectDone must be CAS'd to win the wake-up race. 429 isSelect bool 430 431 // success indicates whether communication over channel c 432 // succeeded. It is true if the goroutine was awoken because a 433 // value was delivered over channel c, and false if awoken 434 // because c was closed. 435 success bool 436 437 // waiters is a count of semaRoot waiting list other than head of list, 438 // clamped to a uint16 to fit in unused space. 439 // Only meaningful at the head of the list. 440 // (If we wanted to be overly clever, we could store a high 16 bits 441 // in the second entry in the list.) 442 waiters uint16 443 444 parent *sudog // semaRoot binary tree 445 waitlink *sudog // g.waiting list or semaRoot 446 waittail *sudog // semaRoot 447 c maybeTraceableChan // channel 448 } 449 450 type libcall struct { 451 fn uintptr 452 n uintptr // number of parameters 453 args uintptr // parameters 454 r1 uintptr // return values 455 r2 uintptr 456 err uintptr // error number 457 } 458 459 // Stack describes a Go execution stack. 460 // The bounds of the stack are exactly [lo, hi), 461 // with no implicit data structures on either side. 462 type stack struct { 463 lo uintptr 464 hi uintptr 465 } 466 467 // heldLockInfo gives info on a held lock and the rank of that lock 468 type heldLockInfo struct { 469 lockAddr uintptr 470 rank lockRank 471 } 472 473 type g struct { 474 // Stack parameters. 475 // stack describes the actual stack memory: [stack.lo, stack.hi). 476 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 477 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 478 // stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue. 479 // It is stack.lo+StackGuard on g0 and gsignal stacks. 480 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 481 stack stack // offset known to runtime/cgo 482 stackguard0 uintptr // offset known to liblink 483 stackguard1 uintptr // offset known to liblink 484 485 _panic *_panic // innermost panic - offset known to liblink 486 _defer *_defer // innermost defer 487 m *m // current m; offset known to arm liblink 488 sched gobuf 489 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 490 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 491 syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback 492 stktopsp uintptr // expected sp at top of stack, to check in traceback 493 // param is a generic pointer parameter field used to pass 494 // values in particular contexts where other storage for the 495 // parameter would be difficult to find. It is currently used 496 // in four ways: 497 // 1. When a channel operation wakes up a blocked goroutine, it sets param to 498 // point to the sudog of the completed blocking operation. 499 // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed 500 // the GC cycle. It is unsafe to do so in any other way, because the goroutine's 501 // stack may have moved in the meantime. 502 // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a 503 // closure in the runtime is forbidden. 504 // 4. When a panic is recovered and control returns to the respective frame, 505 // param may point to a savedOpenDeferState. 506 param unsafe.Pointer 507 atomicstatus atomic.Uint32 508 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 509 goid uint64 510 schedlink guintptr 511 waitsince int64 // approx time when the g become blocked 512 waitreason waitReason // if status==Gwaiting 513 514 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 515 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 516 preemptShrink bool // shrink stack at synchronous safe point 517 518 // asyncSafePoint is set if g is stopped at an asynchronous 519 // safe point. This means there are frames on the stack 520 // without precise pointer information. 521 asyncSafePoint bool 522 523 paniconfault bool // panic (instead of crash) on unexpected fault address 524 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 525 throwsplit bool // must not split stack 526 // activeStackChans indicates that there are unlocked channels 527 // pointing into this goroutine's stack. If true, stack 528 // copying needs to acquire channel locks to protect these 529 // areas of the stack. 530 activeStackChans bool 531 // parkingOnChan indicates that the goroutine is about to 532 // park on a chansend or chanrecv. Used to signal an unsafe point 533 // for stack shrinking. 534 parkingOnChan atomic.Bool 535 // inMarkAssist indicates whether the goroutine is in mark assist. 536 // Used by the execution tracer. 537 inMarkAssist bool 538 coroexit bool // argument to coroswitch_m 539 540 raceignore int8 // ignore race detection events 541 nocgocallback bool // whether disable callback from C 542 tracking bool // whether we're tracking this G for sched latency statistics 543 trackingSeq uint8 // used to decide whether to track this G 544 trackingStamp int64 // timestamp of when the G last started being tracked 545 runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking 546 lockedm muintptr 547 fipsIndicator uint8 548 fipsOnlyBypass bool 549 ditWanted bool // set if g wants to be executed with DIT enabled 550 syncSafePoint bool // set if g is stopped at a synchronous safe point. 551 runningCleanups atomic.Bool 552 sig uint32 553 secret int32 // current nesting of runtime/secret.Do calls. 554 writebuf []byte 555 sigcode0 uintptr 556 sigcode1 uintptr 557 sigpc uintptr 558 parentGoid uint64 // goid of goroutine that created this goroutine 559 gopc uintptr // pc of go statement that created this goroutine 560 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 561 startpc uintptr // pc of goroutine function 562 racectx uintptr 563 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 564 cgoCtxt []uintptr // cgo traceback context 565 labels unsafe.Pointer // profiler labels 566 timer *timer // cached timer for time.Sleep 567 sleepWhen int64 // when to sleep until 568 selectDone atomic.Uint32 // are we participating in a select and did someone win the race? 569 570 // goroutineProfiled indicates the status of this goroutine's stack for the 571 // current in-progress goroutine profile 572 goroutineProfiled goroutineProfileStateHolder 573 574 coroarg *coro // argument during coroutine transfers 575 bubble *synctestBubble 576 577 // xRegs stores the extended register state if this G has been 578 // asynchronously preempted. 579 xRegs xRegPerG 580 581 // Per-G tracer state. 582 trace gTraceState 583 584 // Per-G GC state 585 586 // gcAssistBytes is this G's GC assist credit in terms of 587 // bytes allocated. If this is positive, then the G has credit 588 // to allocate gcAssistBytes bytes without assisting. If this 589 // is negative, then the G must correct this by performing 590 // scan work. We track this in bytes to make it fast to update 591 // and check for debt in the malloc hot path. The assist ratio 592 // determines how this corresponds to scan work debt. 593 gcAssistBytes int64 594 595 // valgrindStackID is used to track what memory is used for stacks when a program is 596 // built with the "valgrind" build tag, otherwise it is unused. 597 valgrindStackID uintptr 598 } 599 600 // gTrackingPeriod is the number of transitions out of _Grunning between 601 // latency tracking runs. 602 const gTrackingPeriod = 8 603 604 const ( 605 // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms, 606 // like Windows. 607 tlsSlots = 6 608 tlsSize = tlsSlots * goarch.PtrSize 609 ) 610 611 // Values for m.freeWait. 612 const ( 613 freeMStack = 0 // M done, free stack and reference. 614 freeMRef = 1 // M done, free reference. 615 freeMWait = 2 // M still in use. 616 ) 617 618 type m struct { 619 g0 *g // goroutine with scheduling stack 620 morebuf gobuf // gobuf arg to morestack 621 divmod uint32 // div/mod denominator for arm - known to liblink (cmd/internal/obj/arm/obj5.go) 622 623 // Fields whose offsets are not known to debuggers. 624 625 procid uint64 // for debuggers, but offset not hard-coded 626 gsignal *g // signal-handling g 627 goSigStack gsignalStack // Go-allocated signal handling stack 628 sigmask sigset // storage for saved signal mask 629 tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) 630 mstartfn func() 631 curg *g // current running goroutine 632 caughtsig guintptr // goroutine running during fatal signal 633 signalSecret uint32 // whether we have secret information in our signal stack 634 635 // p is the currently attached P for executing Go code, nil if not executing user Go code. 636 // 637 // A non-nil p implies exclusive ownership of the P, unless curg is in _Gsyscall. 638 // In _Gsyscall the scheduler may mutate this instead. The point of synchronization 639 // is the _Gscan bit on curg's status. The scheduler must arrange to prevent curg 640 // from transitioning out of _Gsyscall if it intends to mutate p. 641 p puintptr 642 643 nextp puintptr // The next P to install before executing. Implies exclusive ownership of this P. 644 oldp puintptr // The P that was attached before executing a syscall. 645 id int64 646 mallocing int32 647 throwing throwType 648 preemptoff string // if != "", keep curg running on this m 649 locks int32 650 dying int32 651 profilehz int32 652 spinning bool // m is out of work and is actively looking for work 653 blocked bool // m is blocked on a note 654 newSigstack bool // minit on C thread called sigaltstack 655 printlock int8 656 incgo bool // m is executing a cgo call 657 isextra bool // m is an extra m 658 isExtraInC bool // m is an extra m that does not have any Go frames 659 isExtraInSig bool // m is an extra m in a signal handler 660 freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) 661 needextram bool 662 g0StackAccurate bool // whether the g0 stack has accurate bounds 663 traceback uint8 664 allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise. 665 ncgocall uint64 // number of cgo calls in total 666 ncgo int32 // number of cgo calls currently in progress 667 cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily 668 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 669 park note 670 alllink *m // on allm 671 schedlink muintptr 672 idleNode listNodeManual 673 lockedg guintptr 674 createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. 675 lockedExt uint32 // tracking for external LockOSThread 676 lockedInt uint32 // tracking for internal lockOSThread 677 mWaitList mWaitList // list of runtime lock waiters 678 ditEnabled bool // set if DIT is currently enabled on this M 679 680 mLockProfile mLockProfile // fields relating to runtime.lock contention 681 profStack []uintptr // used for memory/block/mutex stack traces 682 683 // wait* are used to carry arguments from gopark into park_m, because 684 // there's no stack to put them on. That is their sole purpose. 685 waitunlockf func(*g, unsafe.Pointer) bool 686 waitlock unsafe.Pointer 687 waitTraceSkip int 688 waitTraceBlockReason traceBlockReason 689 690 syscalltick uint32 691 freelink *m // on sched.freem 692 trace mTraceState 693 694 // These are here to avoid using the G stack so the stack can move during the call. 695 libcallpc uintptr // for cpu profiler 696 libcallsp uintptr 697 libcallg guintptr 698 winsyscall winlibcall // stores syscall parameters on windows 699 700 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 701 vdsoPC uintptr // PC for traceback while in VDSO call 702 703 // preemptGen counts the number of completed preemption 704 // signals. This is used to detect when a preemption is 705 // requested, but fails. 706 preemptGen atomic.Uint32 707 708 // Whether this is a pending preemption signal on this M. 709 signalPending atomic.Uint32 710 711 // pcvalue lookup cache 712 pcvalueCache pcvalueCache 713 714 dlogPerM 715 716 mOS 717 718 chacha8 chacha8rand.State 719 cheaprand uint64 720 721 // Up to 10 locks held by this m, maintained by the lock ranking code. 722 locksHeldLen int 723 locksHeld [10]heldLockInfo 724 725 // self points this M until mexit clears it to return nil. 726 self mWeakPointer 727 } 728 729 const mRedZoneSize = (16 << 3) * asanenabledBit // redZoneSize(2048) 730 731 type mPadded struct { 732 m 733 734 // Size the runtime.m structure so it fits in the 2048-byte size class, and 735 // not in the next-smallest (1792-byte) size class. That leaves the 11 low 736 // bits of muintptr values available for flags, as required by 737 // lock_spinbit.go. 738 _ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte 739 } 740 741 // mWeakPointer is a "weak" pointer to an M. A weak pointer for each M is 742 // available as m.self. Users may copy mWeakPointer arbitrarily, and get will 743 // return the M if it is still live, or nil after mexit. 744 // 745 // The zero value is treated as a nil pointer. 746 // 747 // Note that get may race with M exit. A successful get will keep the m object 748 // alive, but the M itself may be exited and thus not actually usable. 749 type mWeakPointer struct { 750 m *atomic.Pointer[m] 751 } 752 753 func newMWeakPointer(mp *m) mWeakPointer { 754 w := mWeakPointer{m: new(atomic.Pointer[m])} 755 w.m.Store(mp) 756 return w 757 } 758 759 func (w mWeakPointer) get() *m { 760 if w.m == nil { 761 return nil 762 } 763 return w.m.Load() 764 } 765 766 // clear sets the weak pointer to nil. It cannot be used on zero value 767 // mWeakPointers. 768 func (w mWeakPointer) clear() { 769 w.m.Store(nil) 770 } 771 772 type p struct { 773 id int32 774 status uint32 // one of pidle/prunning/... 775 link puintptr 776 schedtick uint32 // incremented on every scheduler call 777 syscalltick uint32 // incremented on every system call 778 sysmontick sysmontick // last tick observed by sysmon 779 m muintptr // back-link to associated m (nil if idle) 780 mcache *mcache 781 pcache pageCache 782 raceprocctx uintptr 783 784 // oldm is the previous m this p ran on. 785 // 786 // We are not assosciated with this m, so we have no control over its 787 // lifecycle. This value is an m.self object which points to the m 788 // until the m exits. 789 // 790 // Note that this m may be idle, running, or exiting. It should only be 791 // used with mgetSpecific, which will take ownership of the m only if 792 // it is idle. 793 oldm mWeakPointer 794 795 deferpool []*_defer // pool of available defer structs (see panic.go) 796 deferpoolbuf [32]*_defer 797 798 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 799 goidcache uint64 800 goidcacheend uint64 801 802 // Queue of runnable goroutines. Accessed without lock. 803 runqhead uint32 804 runqtail uint32 805 runq [256]guintptr 806 // runnext, if non-nil, is a runnable G that was ready'd by 807 // the current G and should be run next instead of what's in 808 // runq if there's time remaining in the running G's time 809 // slice. It will inherit the time left in the current time 810 // slice. If a set of goroutines is locked in a 811 // communicate-and-wait pattern, this schedules that set as a 812 // unit and eliminates the (potentially large) scheduling 813 // latency that otherwise arises from adding the ready'd 814 // goroutines to the end of the run queue. 815 // 816 // Note that while other P's may atomically CAS this to zero, 817 // only the owner P can CAS it to a valid G. 818 runnext guintptr 819 820 // Available G's (status == Gdead) 821 gFree gList 822 823 sudogcache []*sudog 824 sudogbuf [128]*sudog 825 826 // Cache of mspan objects from the heap. 827 mspancache struct { 828 // We need an explicit length here because this field is used 829 // in allocation codepaths where write barriers are not allowed, 830 // and eliminating the write barrier/keeping it eliminated from 831 // slice updates is tricky, more so than just managing the length 832 // ourselves. 833 len int 834 buf [128]*mspan 835 } 836 837 // Cache of a single pinner object to reduce allocations from repeated 838 // pinner creation. 839 pinnerCache *pinner 840 841 trace pTraceState 842 843 palloc persistentAlloc // per-P to avoid mutex 844 845 // Per-P GC state 846 gcAssistTime int64 // Nanoseconds in assistAlloc 847 gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker 848 849 // limiterEvent tracks events for the GC CPU limiter. 850 limiterEvent limiterEvent 851 852 // gcMarkWorkerMode is the mode for the next mark worker to run in. 853 // That is, this is used to communicate with the worker goroutine 854 // selected for immediate execution by 855 // gcController.findRunnableGCWorker. When scheduling other goroutines, 856 // this field must be set to gcMarkWorkerNotWorker. 857 gcMarkWorkerMode gcMarkWorkerMode 858 // gcMarkWorkerStartTime is the nanotime() at which the most recent 859 // mark worker started. 860 gcMarkWorkerStartTime int64 861 862 // nextGCMarkWorker is the next mark worker to run. This may be set 863 // during start-the-world to assign a worker to this P. The P runs this 864 // worker on the next call to gcController.findRunnableGCWorker. If the 865 // P runs something else or stops, it must release this worker via 866 // gcController.releaseNextGCMarkWorker. 867 // 868 // See comment in gcBgMarkWorker about the lifetime of 869 // gcBgMarkWorkerNode. 870 // 871 // Only accessed by this P or during STW. 872 nextGCMarkWorker *gcBgMarkWorkerNode 873 874 // gcw is this P's GC work buffer cache. The work buffer is 875 // filled by write barriers, drained by mutator assists, and 876 // disposed on certain GC state transitions. 877 gcw gcWork 878 879 // wbBuf is this P's GC write barrier buffer. 880 // 881 // TODO: Consider caching this in the running G. 882 wbBuf wbBuf 883 884 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 885 886 // statsSeq is a counter indicating whether this P is currently 887 // writing any stats. Its value is even when not, odd when it is. 888 statsSeq atomic.Uint32 889 890 // Timer heap. 891 timers timers 892 893 // Cleanups. 894 cleanups *cleanupBlock 895 cleanupsQueued uint64 // monotonic count of cleanups queued by this P 896 897 // maxStackScanDelta accumulates the amount of stack space held by 898 // live goroutines (i.e. those eligible for stack scanning). 899 // Flushed to gcController.maxStackScan once maxStackScanSlack 900 // or -maxStackScanSlack is reached. 901 maxStackScanDelta int64 902 903 // gc-time statistics about current goroutines 904 // Note that this differs from maxStackScan in that this 905 // accumulates the actual stack observed to be used at GC time (hi - sp), 906 // not an instantaneous measure of the total stack size that might need 907 // to be scanned (hi - lo). 908 scannedStackSize uint64 // stack size of goroutines scanned by this P 909 scannedStacks uint64 // number of goroutines scanned by this P 910 911 // preempt is set to indicate that this P should be enter the 912 // scheduler ASAP (regardless of what G is running on it). 913 preempt bool 914 915 // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. 916 gcStopTime int64 917 918 // goroutinesCreated is the total count of goroutines created by this P. 919 goroutinesCreated uint64 920 921 // xRegs is the per-P extended register state used by asynchronous 922 // preemption. This is an empty struct on platforms that don't use extended 923 // register state. 924 xRegs xRegPerP 925 926 // Padding is no longer needed. False sharing is now not a worry because p is large enough 927 // that its size class is an integer multiple of the cache line size (for any of our architectures). 928 } 929 930 type schedt struct { 931 goidgen atomic.Uint64 932 lastpoll atomic.Int64 // time of last network poll, 0 if currently polling 933 pollUntil atomic.Int64 // time to which current poll is sleeping 934 pollingNet atomic.Int32 // 1 if some P doing non-blocking network poll 935 936 lock mutex 937 938 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 939 // sure to call checkdead(). 940 941 midle listHeadManual // idle m's waiting for work 942 nmidle int32 // number of idle m's waiting for work 943 nmidlelocked int32 // number of locked m's waiting for work 944 mnext int64 // number of m's that have been created and next M ID 945 maxmcount int32 // maximum number of m's allowed (or die) 946 nmsys int32 // number of system m's not counted for deadlock 947 nmfreed int64 // cumulative number of freed m's 948 949 ngsys atomic.Int32 // number of system goroutines 950 nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P but whose M is not isExtraInC 951 952 pidle puintptr // idle p's 953 npidle atomic.Int32 954 nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go. 955 needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1. 956 957 // Global runnable queue. 958 runq gQueue 959 960 // disable controls selective disabling of the scheduler. 961 // 962 // Use schedEnableUser to control this. 963 // 964 // disable is protected by sched.lock. 965 disable struct { 966 // user disables scheduling of user goroutines. 967 user bool 968 runnable gQueue // pending runnable Gs 969 } 970 971 // Global cache of dead G's. 972 gFree struct { 973 lock mutex 974 stack gList // Gs with stacks 975 noStack gList // Gs without stacks 976 } 977 978 // Central cache of sudog structs. 979 sudoglock mutex 980 sudogcache *sudog 981 982 // Central pool of available defer structs. 983 deferlock mutex 984 deferpool *_defer 985 986 // freem is the list of m's waiting to be freed when their 987 // m.exited is set. Linked through m.freelink. 988 freem *m 989 990 gcwaiting atomic.Bool // gc is waiting to run 991 stopwait int32 992 stopnote note 993 sysmonwait atomic.Bool 994 sysmonnote note 995 996 // safePointFn should be called on each P at the next GC 997 // safepoint if p.runSafePointFn is set. 998 safePointFn func(*p) 999 safePointWait int32 1000 safePointNote note 1001 1002 profilehz int32 // cpu profiling rate 1003 1004 procresizetime int64 // nanotime() of last change to gomaxprocs 1005 totaltime int64 // ∫gomaxprocs dt up to procresizetime 1006 1007 customGOMAXPROCS bool // GOMAXPROCS was manually set from the environment or runtime.GOMAXPROCS 1008 1009 // sysmonlock protects sysmon's actions on the runtime. 1010 // 1011 // Acquire and hold this mutex to block sysmon from interacting 1012 // with the rest of the runtime. 1013 sysmonlock mutex 1014 1015 // timeToRun is a distribution of scheduling latencies, defined 1016 // as the sum of time a G spends in the _Grunnable state before 1017 // it transitions to _Grunning. 1018 timeToRun timeHistogram 1019 1020 // idleTime is the total CPU time Ps have "spent" idle. 1021 // 1022 // Reset on each GC cycle. 1023 idleTime atomic.Int64 1024 1025 // totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting 1026 // with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock. 1027 totalMutexWaitTime atomic.Int64 1028 1029 // stwStoppingTimeGC/Other are distributions of stop-the-world stopping 1030 // latencies, defined as the time taken by stopTheWorldWithSema to get 1031 // all Ps to stop. stwStoppingTimeGC covers all GC-related STWs, 1032 // stwStoppingTimeOther covers the others. 1033 stwStoppingTimeGC timeHistogram 1034 stwStoppingTimeOther timeHistogram 1035 1036 // stwTotalTimeGC/Other are distributions of stop-the-world total 1037 // latencies, defined as the total time from stopTheWorldWithSema to 1038 // startTheWorldWithSema. This is a superset of 1039 // stwStoppingTimeGC/Other. stwTotalTimeGC covers all GC-related STWs, 1040 // stwTotalTimeOther covers the others. 1041 stwTotalTimeGC timeHistogram 1042 stwTotalTimeOther timeHistogram 1043 1044 // totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in 1045 // allm) is the sum of time goroutines have spent in _Grunnable and with an 1046 // M, but waiting for locks within the runtime. This field stores the value 1047 // for Ms that have exited. 1048 totalRuntimeLockWaitTime atomic.Int64 1049 1050 // goroutinesCreated (plus the value of goroutinesCreated on each P in allp) 1051 // is the sum of all goroutines created by the program. 1052 goroutinesCreated atomic.Uint64 1053 } 1054 1055 // Values for the flags field of a sigTabT. 1056 const ( 1057 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 1058 _SigKill // if signal.Notify doesn't take it, exit quietly 1059 _SigThrow // if signal.Notify doesn't take it, exit loudly 1060 _SigPanic // if the signal is from the kernel, panic 1061 _SigDefault // if the signal isn't explicitly requested, don't monitor it 1062 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 1063 _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler 1064 _SigUnblock // always unblock; see blockableSig 1065 _SigIgn // _SIG_DFL action is to ignore the signal 1066 ) 1067 1068 // Layout of in-memory per-function information prepared by linker 1069 // See https://golang.org/s/go12symtab. 1070 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 1071 // and with package debug/gosym and with symtab.go in package runtime. 1072 type _func struct { 1073 sys.NotInHeap // Only in static data 1074 1075 entryOff uint32 // start pc, as offset from moduledata.text 1076 nameOff int32 // function name, as index into moduledata.funcnametab. 1077 1078 args int32 // in/out args size 1079 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. 1080 1081 pcsp uint32 1082 pcfile uint32 1083 pcln uint32 1084 npcdata uint32 1085 cuOffset uint32 // runtime.cutab offset of this function's CU 1086 startLine int32 // line number of start of function (func keyword/TEXT directive) 1087 funcID abi.FuncID // set for certain special runtime functions 1088 flag abi.FuncFlag 1089 _ [1]byte // pad 1090 nfuncdata uint8 // must be last, must end on a uint32-aligned boundary 1091 1092 // The end of the struct is followed immediately by two variable-length 1093 // arrays that reference the pcdata and funcdata locations for this 1094 // function. 1095 1096 // pcdata contains the offset into moduledata.pctab for the start of 1097 // that index's table. e.g., 1098 // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of 1099 // the unsafe point table. 1100 // 1101 // An offset of 0 indicates that there is no table. 1102 // 1103 // pcdata [npcdata]uint32 1104 1105 // funcdata contains the offset past moduledata.gofunc which contains a 1106 // pointer to that index's funcdata. e.g., 1107 // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is 1108 // the argument pointer map. 1109 // 1110 // An offset of ^uint32(0) indicates that there is no entry. 1111 // 1112 // funcdata [nfuncdata]uint32 1113 } 1114 1115 // Pseudo-Func that is returned for PCs that occur in inlined code. 1116 // A *Func can be either a *_func or a *funcinl, and they are distinguished 1117 // by the first uintptr. 1118 // 1119 // TODO(austin): Can we merge this with inlinedCall? 1120 type funcinl struct { 1121 ones uint32 // set to ^0 to distinguish from _func 1122 entry uintptr // entry of the real (the "outermost") frame 1123 name string 1124 file string 1125 line int32 1126 startLine int32 1127 } 1128 1129 type itab = abi.ITab 1130 1131 // Lock-free stack node. 1132 // Also known to export_test.go. 1133 type lfnode struct { 1134 next uint64 1135 pushcnt uintptr 1136 } 1137 1138 type forcegcstate struct { 1139 lock mutex 1140 g *g 1141 idle atomic.Bool 1142 } 1143 1144 // A _defer holds an entry on the list of deferred calls. 1145 // If you add a field here, add code to clear it in deferProcStack. 1146 // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct 1147 // and cmd/compile/internal/ssagen/ssa.go:(*state).call. 1148 // Some defers will be allocated on the stack and some on the heap. 1149 // All defers are logically part of the stack, so write barriers to 1150 // initialize them are not required. All defers must be manually scanned, 1151 // and for heap defers, marked. 1152 type _defer struct { 1153 heap bool 1154 rangefunc bool // true for rangefunc list 1155 sp uintptr // sp at time of defer 1156 pc uintptr // pc at time of defer 1157 fn func() // can be nil for open-coded defers 1158 link *_defer // next defer on G; can point to either heap or stack! 1159 1160 // If rangefunc is true, *head is the head of the atomic linked list 1161 // during a range-over-func execution. 1162 head *atomic.Pointer[_defer] 1163 } 1164 1165 // A _panic holds information about an active panic. 1166 // 1167 // A _panic value must only ever live on the stack. 1168 // 1169 // The gopanicFP and link fields are stack pointers, but don't need special 1170 // handling during stack growth: because they are pointer-typed and 1171 // _panic values only live on the stack, regular stack pointer 1172 // adjustment takes care of them. 1173 type _panic struct { 1174 arg any // argument to panic 1175 link *_panic // link to earlier panic 1176 1177 // startPC and startSP track where _panic.start was called. 1178 startPC uintptr 1179 startSP unsafe.Pointer 1180 1181 // The current stack frame that we're running deferred calls for. 1182 sp unsafe.Pointer 1183 lr uintptr 1184 fp unsafe.Pointer 1185 1186 // retpc stores the PC where the panic should jump back to, if the 1187 // function last returned by _panic.next() recovers the panic. 1188 retpc uintptr 1189 1190 // Extra state for handling open-coded defers. 1191 deferBitsPtr *uint8 1192 slotsPtr unsafe.Pointer 1193 1194 recovered bool // whether this panic has been recovered 1195 repanicked bool // whether this panic repanicked 1196 goexit bool 1197 deferreturn bool 1198 1199 gopanicFP unsafe.Pointer // frame pointer of the gopanic frame 1200 } 1201 1202 // savedOpenDeferState tracks the extra state from _panic that's 1203 // necessary for deferreturn to pick up where gopanic left off, 1204 // without needing to unwind the stack. 1205 type savedOpenDeferState struct { 1206 retpc uintptr 1207 deferBitsOffset uintptr 1208 slotsOffset uintptr 1209 } 1210 1211 // ancestorInfo records details of where a goroutine was started. 1212 type ancestorInfo struct { 1213 pcs []uintptr // pcs from the stack of this goroutine 1214 goid uint64 // goroutine id of this goroutine; original goroutine possibly dead 1215 gopc uintptr // pc of go statement that created this goroutine 1216 } 1217 1218 // A waitReason explains why a goroutine has been stopped. 1219 // See gopark. Do not re-use waitReasons, add new ones. 1220 type waitReason uint8 1221 1222 const ( 1223 waitReasonZero waitReason = iota // "" 1224 waitReasonGCAssistMarking // "GC assist marking" 1225 waitReasonIOWait // "IO wait" 1226 waitReasonDumpingHeap // "dumping heap" 1227 waitReasonGarbageCollection // "garbage collection" 1228 waitReasonGarbageCollectionScan // "garbage collection scan" 1229 waitReasonPanicWait // "panicwait" 1230 waitReasonGCAssistWait // "GC assist wait" 1231 waitReasonGCSweepWait // "GC sweep wait" 1232 waitReasonGCScavengeWait // "GC scavenge wait" 1233 waitReasonFinalizerWait // "finalizer wait" 1234 waitReasonForceGCIdle // "force gc (idle)" 1235 waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)" 1236 waitReasonSemacquire // "semacquire" 1237 waitReasonSleep // "sleep" 1238 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 1239 waitReasonChanSendNilChan // "chan send (nil chan)" 1240 waitReasonSelectNoCases // "select (no cases)" 1241 waitReasonSelect // "select" 1242 waitReasonChanReceive // "chan receive" 1243 waitReasonChanSend // "chan send" 1244 waitReasonSyncCondWait // "sync.Cond.Wait" 1245 waitReasonSyncMutexLock // "sync.Mutex.Lock" 1246 waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock" 1247 waitReasonSyncRWMutexLock // "sync.RWMutex.Lock" 1248 waitReasonSyncWaitGroupWait // "sync.WaitGroup.Wait" 1249 waitReasonTraceReaderBlocked // "trace reader (blocked)" 1250 waitReasonWaitForGCCycle // "wait for GC cycle" 1251 waitReasonGCWorkerIdle // "GC worker (idle)" 1252 waitReasonGCWorkerActive // "GC worker (active)" 1253 waitReasonPreempted // "preempted" 1254 waitReasonDebugCall // "debug call" 1255 waitReasonGCMarkTermination // "GC mark termination" 1256 waitReasonStoppingTheWorld // "stopping the world" 1257 waitReasonFlushProcCaches // "flushing proc caches" 1258 waitReasonTraceGoroutineStatus // "trace goroutine status" 1259 waitReasonTraceProcStatus // "trace proc status" 1260 waitReasonPageTraceFlush // "page trace flush" 1261 waitReasonCoroutine // "coroutine" 1262 waitReasonGCWeakToStrongWait // "GC weak to strong wait" 1263 waitReasonSynctestRun // "synctest.Run" 1264 waitReasonSynctestWait // "synctest.Wait" 1265 waitReasonSynctestChanReceive // "chan receive (durable)" 1266 waitReasonSynctestChanSend // "chan send (durable)" 1267 waitReasonSynctestSelect // "select (durable)" 1268 waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (durable)" 1269 waitReasonCleanupWait // "cleanup wait" 1270 ) 1271 1272 var waitReasonStrings = [...]string{ 1273 waitReasonZero: "", 1274 waitReasonGCAssistMarking: "GC assist marking", 1275 waitReasonIOWait: "IO wait", 1276 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 1277 waitReasonChanSendNilChan: "chan send (nil chan)", 1278 waitReasonDumpingHeap: "dumping heap", 1279 waitReasonGarbageCollection: "garbage collection", 1280 waitReasonGarbageCollectionScan: "garbage collection scan", 1281 waitReasonPanicWait: "panicwait", 1282 waitReasonSelect: "select", 1283 waitReasonSelectNoCases: "select (no cases)", 1284 waitReasonGCAssistWait: "GC assist wait", 1285 waitReasonGCSweepWait: "GC sweep wait", 1286 waitReasonGCScavengeWait: "GC scavenge wait", 1287 waitReasonChanReceive: "chan receive", 1288 waitReasonChanSend: "chan send", 1289 waitReasonFinalizerWait: "finalizer wait", 1290 waitReasonForceGCIdle: "force gc (idle)", 1291 waitReasonUpdateGOMAXPROCSIdle: "GOMAXPROCS updater (idle)", 1292 waitReasonSemacquire: "semacquire", 1293 waitReasonSleep: "sleep", 1294 waitReasonSyncCondWait: "sync.Cond.Wait", 1295 waitReasonSyncMutexLock: "sync.Mutex.Lock", 1296 waitReasonSyncRWMutexRLock: "sync.RWMutex.RLock", 1297 waitReasonSyncRWMutexLock: "sync.RWMutex.Lock", 1298 waitReasonSyncWaitGroupWait: "sync.WaitGroup.Wait", 1299 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1300 waitReasonWaitForGCCycle: "wait for GC cycle", 1301 waitReasonGCWorkerIdle: "GC worker (idle)", 1302 waitReasonGCWorkerActive: "GC worker (active)", 1303 waitReasonPreempted: "preempted", 1304 waitReasonDebugCall: "debug call", 1305 waitReasonGCMarkTermination: "GC mark termination", 1306 waitReasonStoppingTheWorld: "stopping the world", 1307 waitReasonFlushProcCaches: "flushing proc caches", 1308 waitReasonTraceGoroutineStatus: "trace goroutine status", 1309 waitReasonTraceProcStatus: "trace proc status", 1310 waitReasonPageTraceFlush: "page trace flush", 1311 waitReasonCoroutine: "coroutine", 1312 waitReasonGCWeakToStrongWait: "GC weak to strong wait", 1313 waitReasonSynctestRun: "synctest.Run", 1314 waitReasonSynctestWait: "synctest.Wait", 1315 waitReasonSynctestChanReceive: "chan receive (durable)", 1316 waitReasonSynctestChanSend: "chan send (durable)", 1317 waitReasonSynctestSelect: "select (durable)", 1318 waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (durable)", 1319 waitReasonCleanupWait: "cleanup wait", 1320 } 1321 1322 func (w waitReason) String() string { 1323 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1324 return "unknown wait reason" 1325 } 1326 return waitReasonStrings[w] 1327 } 1328 1329 // isMutexWait returns true if the goroutine is blocked because of 1330 // sync.Mutex.Lock or sync.RWMutex.[R]Lock. 1331 // 1332 //go:nosplit 1333 func (w waitReason) isMutexWait() bool { 1334 return w == waitReasonSyncMutexLock || 1335 w == waitReasonSyncRWMutexRLock || 1336 w == waitReasonSyncRWMutexLock 1337 } 1338 1339 // isSyncWait returns true if the goroutine is blocked because of 1340 // sync library primitive operations. 1341 // 1342 //go:nosplit 1343 func (w waitReason) isSyncWait() bool { 1344 return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait 1345 } 1346 1347 // isChanWait is true if the goroutine is blocked because of non-nil 1348 // channel operations or a select statement with at least one case. 1349 // 1350 //go:nosplit 1351 func (w waitReason) isChanWait() bool { 1352 return w == waitReasonSelect || 1353 w == waitReasonChanReceive || 1354 w == waitReasonChanSend 1355 } 1356 1357 func (w waitReason) isWaitingForSuspendG() bool { 1358 return isWaitingForSuspendG[w] 1359 } 1360 1361 // isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and 1362 // setting a waitReason because it needs to be able to let the suspendG 1363 // (used by the GC and the execution tracer) take ownership of its stack. 1364 // The G is always actually executing on the system stack in these cases. 1365 // 1366 // TODO(mknyszek): Consider replacing this with a new dedicated G status. 1367 var isWaitingForSuspendG = [len(waitReasonStrings)]bool{ 1368 waitReasonStoppingTheWorld: true, 1369 waitReasonGCMarkTermination: true, 1370 waitReasonGarbageCollection: true, 1371 waitReasonGarbageCollectionScan: true, 1372 waitReasonTraceGoroutineStatus: true, 1373 waitReasonTraceProcStatus: true, 1374 waitReasonPageTraceFlush: true, 1375 waitReasonGCAssistMarking: true, 1376 waitReasonGCWorkerActive: true, 1377 waitReasonFlushProcCaches: true, 1378 } 1379 1380 func (w waitReason) isIdleInSynctest() bool { 1381 return isIdleInSynctest[w] 1382 } 1383 1384 // isIdleInSynctest indicates that a goroutine is considered idle by synctest.Wait. 1385 var isIdleInSynctest = [len(waitReasonStrings)]bool{ 1386 waitReasonChanReceiveNilChan: true, 1387 waitReasonChanSendNilChan: true, 1388 waitReasonSelectNoCases: true, 1389 waitReasonSleep: true, 1390 waitReasonSyncCondWait: true, 1391 waitReasonSynctestWaitGroupWait: true, 1392 waitReasonCoroutine: true, 1393 waitReasonSynctestRun: true, 1394 waitReasonSynctestWait: true, 1395 waitReasonSynctestChanReceive: true, 1396 waitReasonSynctestChanSend: true, 1397 waitReasonSynctestSelect: true, 1398 } 1399 1400 var ( 1401 // Linked-list of all Ms. Written under sched.lock, read atomically. 1402 allm *m 1403 1404 gomaxprocs int32 1405 numCPUStartup int32 1406 forcegc forcegcstate 1407 sched schedt 1408 newprocs int32 1409 ) 1410 1411 var ( 1412 // allpLock protects P-less reads and size changes of allp, idlepMask, 1413 // and timerpMask, and all writes to allp. 1414 allpLock mutex 1415 1416 // len(allp) == gomaxprocs; may change at safe points, otherwise 1417 // immutable. 1418 allp []*p 1419 1420 // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must 1421 // be atomic. Length may change at safe points. 1422 // 1423 // Each P must update only its own bit. In order to maintain 1424 // consistency, a P going idle must set the idle mask simultaneously with 1425 // updates to the idle P list under the sched.lock, otherwise a racing 1426 // pidleget may clear the mask before pidleput sets the mask, 1427 // corrupting the bitmap. 1428 // 1429 // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema. 1430 idlepMask pMask 1431 1432 // Bitmask of Ps that may have a timer, one bit per P. Reads and writes 1433 // must be atomic. Length may change at safe points. 1434 // 1435 // Ideally, the timer mask would be kept immediately consistent on any timer 1436 // operations. Unfortunately, updating a shared global data structure in the 1437 // timer hot path adds too much overhead in applications frequently switching 1438 // between no timers and some timers. 1439 // 1440 // As a compromise, the timer mask is updated only on pidleget / pidleput. A 1441 // running P (returned by pidleget) may add a timer at any time, so its mask 1442 // must be set. An idle P (passed to pidleput) cannot add new timers while 1443 // idle, so if it has no timers at that time, its mask may be cleared. 1444 // 1445 // Thus, we get the following effects on timer-stealing in findRunnable: 1446 // 1447 // - Idle Ps with no timers when they go idle are never checked in findRunnable 1448 // (for work- or timer-stealing; this is the ideal case). 1449 // - Running Ps must always be checked. 1450 // - Idle Ps whose timers are stolen must continue to be checked until they run 1451 // again, even after timer expiration. 1452 // 1453 // When the P starts running again, the mask should be set, as a timer may be 1454 // added at any time. 1455 // 1456 // TODO(prattmic): Additional targeted updates may improve the above cases. 1457 // e.g., updating the mask when stealing a timer. 1458 timerpMask pMask 1459 ) 1460 1461 // goarmsoftfp is used by runtime/cgo assembly. 1462 // 1463 //go:linkname goarmsoftfp 1464 1465 var ( 1466 // Pool of GC parked background workers. Entries are type 1467 // *gcBgMarkWorkerNode. 1468 gcBgMarkWorkerPool lfstack 1469 1470 // Total number of gcBgMarkWorker goroutines. Protected by worldsema. 1471 gcBgMarkWorkerCount int32 1472 1473 // Information about what cpu features are available. 1474 // Packages outside the runtime should not use these 1475 // as they are not an external api. 1476 // Set on startup in asm_{386,amd64}.s 1477 processorVersionInfo uint32 1478 isIntel bool 1479 ) 1480 1481 // set by cmd/link on arm systems 1482 // accessed using linkname by internal/runtime/atomic. 1483 // 1484 // goarm should be an internal detail, 1485 // but widely used packages access it using linkname. 1486 // Notable members of the hall of shame include: 1487 // - github.com/creativeprojects/go-selfupdate 1488 // 1489 // Do not remove or change the type signature. 1490 // See go.dev/issue/67401. 1491 // 1492 //go:linkname goarm 1493 var ( 1494 goarm uint8 1495 goarmsoftfp uint8 1496 ) 1497 1498 // Set by the linker so the runtime can determine the buildmode. 1499 var ( 1500 islibrary bool // -buildmode=c-shared 1501 isarchive bool // -buildmode=c-archive 1502 ) 1503 1504 // Must agree with internal/buildcfg.FramePointerEnabled. 1505 const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" 1506 1507 // getcallerfp returns the frame pointer of the caller of the caller 1508 // of this function. 1509 // 1510 //go:nosplit 1511 //go:noinline 1512 func getcallerfp() uintptr { 1513 fp := getfp() // This frame's FP. 1514 if fp != 0 { 1515 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's FP. 1516 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's caller's FP. 1517 } 1518 return fp 1519 } 1520