Source file src/runtime/runtime2.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/chacha8rand" 10 "internal/goarch" 11 "internal/runtime/atomic" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // defined constants 17 const ( 18 // G status 19 // 20 // Beyond indicating the general state of a G, the G status 21 // acts like a lock on the goroutine's stack (and hence its 22 // ability to execute user code). 23 // 24 // If you add to this list, add to the list 25 // of "okay during garbage collection" status 26 // in mgcmark.go too. 27 // 28 // TODO(austin): The _Gscan bit could be much lighter-weight. 29 // For example, we could choose not to run _Gscanrunnable 30 // goroutines found in the run queue, rather than CAS-looping 31 // until they become _Grunnable. And transitions like 32 // _Gscanwaiting -> _Gscanrunnable are actually okay because 33 // they don't affect stack ownership. 34 35 // _Gidle means this goroutine was just allocated and has not 36 // yet been initialized. 37 _Gidle = iota // 0 38 39 // _Grunnable means this goroutine is on a run queue. It is 40 // not currently executing user code. The stack is not owned. 41 _Grunnable // 1 42 43 // _Grunning means this goroutine may execute user code. The 44 // stack is owned by this goroutine. It is not on a run queue. 45 // It is assigned an M (g.m is valid) and it usually has a P 46 // (g.m.p is valid), but there are small windows of time where 47 // it might not, namely upon entering and exiting _Gsyscall. 48 _Grunning // 2 49 50 // _Gsyscall means this goroutine is executing a system call. 51 // It is not executing user code. The stack is owned by this 52 // goroutine. It is not on a run queue. It is assigned an M. 53 // It may have a P attached, but it does not own it. Code 54 // executing in this state must not touch g.m.p. 55 _Gsyscall // 3 56 57 // _Gwaiting means this goroutine is blocked in the runtime. 58 // It is not executing user code. It is not on a run queue, 59 // but should be recorded somewhere (e.g., a channel wait 60 // queue) so it can be ready()d when necessary. The stack is 61 // not owned *except* that a channel operation may read or 62 // write parts of the stack under the appropriate channel 63 // lock. Otherwise, it is not safe to access the stack after a 64 // goroutine enters _Gwaiting (e.g., it may get moved). 65 _Gwaiting // 4 66 67 // _Gmoribund_unused is currently unused, but hardcoded in gdb 68 // scripts. 69 _Gmoribund_unused // 5 70 71 // _Gdead means this goroutine is currently unused. It may be 72 // just exited, on a free list, or just being initialized. It 73 // is not executing user code. It may or may not have a stack 74 // allocated. The G and its stack (if any) are owned by the M 75 // that is exiting the G or that obtained the G from the free 76 // list. 77 _Gdead // 6 78 79 // _Genqueue_unused is currently unused. 80 _Genqueue_unused // 7 81 82 // _Gcopystack means this goroutine's stack is being moved. It 83 // is not executing user code and is not on a run queue. The 84 // stack is owned by the goroutine that put it in _Gcopystack. 85 _Gcopystack // 8 86 87 // _Gpreempted means this goroutine stopped itself for a 88 // suspendG preemption. It is like _Gwaiting, but nothing is 89 // yet responsible for ready()ing it. Some suspendG must CAS 90 // the status to _Gwaiting to take responsibility for 91 // ready()ing this G. 92 _Gpreempted // 9 93 94 // _Gleaked represents a leaked goroutine caught by the GC. 95 _Gleaked // 10 96 97 // _Gdeadextra is a _Gdead goroutine that's attached to an extra M 98 // used for cgo callbacks. 99 _Gdeadextra // 11 100 101 // _Gscan combined with one of the above states other than 102 // _Grunning indicates that GC is scanning the stack. The 103 // goroutine is not executing user code and the stack is owned 104 // by the goroutine that set the _Gscan bit. 105 // 106 // _Gscanrunning is different: it is used to briefly block 107 // state transitions while GC signals the G to scan its own 108 // stack. This is otherwise like _Grunning. 109 // 110 // atomicstatus&~Gscan gives the state the goroutine will 111 // return to when the scan completes. 112 _Gscan = 0x1000 113 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 114 _Gscanrunning = _Gscan + _Grunning // 0x1002 115 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 116 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 117 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 118 _Gscanleaked = _Gscan + _Gleaked // 0x100a 119 _Gscandeadextra = _Gscan + _Gdeadextra // 0x100b 120 ) 121 122 const ( 123 // P status 124 125 // _Pidle means a P is not being used to run user code or the 126 // scheduler. Typically, it's on the idle P list and available 127 // to the scheduler, but it may just be transitioning between 128 // other states. 129 // 130 // The P is owned by the idle list or by whatever is 131 // transitioning its state. Its run queue is empty. 132 _Pidle = iota 133 134 // _Prunning means a P is owned by an M and is being used to 135 // run user code or the scheduler. Only the M that owns this P 136 // is allowed to change the P's status from _Prunning. The M 137 // may transition the P to _Pidle (if it has no more work to 138 // do), or _Pgcstop (to halt for the GC). The M may also hand 139 // ownership of the P off directly to another M (for example, 140 // to schedule a locked G). 141 _Prunning 142 143 // _Psyscall_unused is a now-defunct state for a P. A P is 144 // identified as "in a system call" by looking at the goroutine's 145 // state. 146 _Psyscall_unused 147 148 // _Pgcstop means a P is halted for STW and owned by the M 149 // that stopped the world. The M that stopped the world 150 // continues to use its P, even in _Pgcstop. Transitioning 151 // from _Prunning to _Pgcstop causes an M to release its P and 152 // park. 153 // 154 // The P retains its run queue and startTheWorld will restart 155 // the scheduler on Ps with non-empty run queues. 156 _Pgcstop 157 158 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 159 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 160 // stripped of its resources, though a few things remain 161 // (e.g., trace buffers). 162 _Pdead 163 ) 164 165 // Mutual exclusion locks. In the uncontended case, 166 // as fast as spin locks (just a few user-level instructions), 167 // but on the contention path they sleep in the kernel. 168 // A zeroed Mutex is unlocked (no need to initialize each lock). 169 // Initialization is helpful for static lock ranking, but not required. 170 type mutex struct { 171 // Empty struct if lock ranking is disabled, otherwise includes the lock rank 172 lockRankStruct 173 // Futex-based impl treats it as uint32 key, 174 // while sema-based impl as M* waitm. 175 // Used to be a union, but unions break precise GC. 176 key uintptr 177 } 178 179 type funcval struct { 180 fn uintptr 181 // variable-size, fn-specific data here 182 } 183 184 type iface struct { 185 tab *itab 186 data unsafe.Pointer 187 } 188 189 type eface struct { 190 _type *_type 191 data unsafe.Pointer 192 } 193 194 func efaceOf(ep *any) *eface { 195 return (*eface)(unsafe.Pointer(ep)) 196 } 197 198 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 199 // It is particularly important to avoid write barriers when the current P has 200 // been released, because the GC thinks the world is stopped, and an 201 // unexpected write barrier would not be synchronized with the GC, 202 // which can lead to a half-executed write barrier that has marked the object 203 // but not queued it. If the GC skips the object and completes before the 204 // queuing can occur, it will incorrectly free the object. 205 // 206 // We tried using special assignment functions invoked only when not 207 // holding a running P, but then some updates to a particular memory 208 // word went through write barriers and some did not. This breaks the 209 // write barrier shadow checking mode, and it is also scary: better to have 210 // a word that is completely ignored by the GC than to have one for which 211 // only a few updates are ignored. 212 // 213 // Gs and Ps are always reachable via true pointers in the 214 // allgs and allp lists or (during allocation before they reach those lists) 215 // from stack variables. 216 // 217 // Ms are always reachable via true pointers either from allm or 218 // freem. Unlike Gs and Ps we do free Ms, so it's important that 219 // nothing ever hold an muintptr across a safe point. 220 221 // A guintptr holds a goroutine pointer, but typed as a uintptr 222 // to bypass write barriers. It is used in the Gobuf goroutine state 223 // and in scheduling lists that are manipulated without a P. 224 // 225 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 226 // In one of the few places it is updated by Go code - func save - it must be 227 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 228 // Instead of figuring out how to emit the write barriers missing in the 229 // assembly manipulation, we change the type of the field to uintptr, 230 // so that it does not require write barriers at all. 231 // 232 // Goroutine structs are published in the allg list and never freed. 233 // That will keep the goroutine structs from being collected. 234 // There is never a time that Gobuf.g's contain the only references 235 // to a goroutine: the publishing of the goroutine in allg comes first. 236 // Goroutine pointers are also kept in non-GC-visible places like TLS, 237 // so I can't see them ever moving. If we did want to start moving data 238 // in the GC, we'd need to allocate the goroutine structs from an 239 // alternate arena. Using guintptr doesn't make that problem any worse. 240 // Note that pollDesc.rg, pollDesc.wg also store g in uintptr form, 241 // so they would need to be updated too if g's start moving. 242 type guintptr uintptr 243 244 //go:nosplit 245 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 246 247 //go:nosplit 248 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 249 250 //go:nosplit 251 func (gp *guintptr) cas(old, new guintptr) bool { 252 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 253 } 254 255 //go:nosplit 256 func (gp *g) guintptr() guintptr { 257 return guintptr(unsafe.Pointer(gp)) 258 } 259 260 // setGNoWB performs *gp = new without a write barrier. 261 // For times when it's impractical to use a guintptr. 262 // 263 //go:nosplit 264 //go:nowritebarrier 265 func setGNoWB(gp **g, new *g) { 266 (*guintptr)(unsafe.Pointer(gp)).set(new) 267 } 268 269 type puintptr uintptr 270 271 //go:nosplit 272 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 273 274 //go:nosplit 275 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 276 277 // muintptr is a *m that is not tracked by the garbage collector. 278 // 279 // Because we do free Ms, there are some additional constrains on 280 // muintptrs: 281 // 282 // 1. Never hold an muintptr locally across a safe point. 283 // 284 // 2. Any muintptr in the heap must be owned by the M itself so it can 285 // ensure it is not in use when the last true *m is released. 286 type muintptr uintptr 287 288 //go:nosplit 289 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 290 291 //go:nosplit 292 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 293 294 // setMNoWB performs *mp = new without a write barrier. 295 // For times when it's impractical to use an muintptr. 296 // 297 //go:nosplit 298 //go:nowritebarrier 299 func setMNoWB(mp **m, new *m) { 300 (*muintptr)(unsafe.Pointer(mp)).set(new) 301 } 302 303 type gobuf struct { 304 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 305 // 306 // ctxt is unusual with respect to GC: it may be a 307 // heap-allocated funcval, so GC needs to track it, but it 308 // needs to be set and cleared from assembly, where it's 309 // difficult to have write barriers. However, ctxt is really a 310 // saved, live register, and we only ever exchange it between 311 // the real register and the gobuf. Hence, we treat it as a 312 // root during stack scanning, which means assembly that saves 313 // and restores it doesn't need write barriers. It's still 314 // typed as a pointer so that any other writes from Go get 315 // write barriers. 316 sp uintptr 317 pc uintptr 318 g guintptr 319 ctxt unsafe.Pointer 320 lr uintptr 321 bp uintptr // for framepointer-enabled architectures 322 } 323 324 // maybeTraceablePtr is a special pointer that is conditionally trackable 325 // by the GC. It consists of an address as a uintptr (vu) and a pointer 326 // to a data element (vp). 327 // 328 // maybeTraceablePtr values can be in one of three states: 329 // 1. Unset: vu == 0 && vp == nil 330 // 2. Untracked: vu != 0 && vp == nil 331 // 3. Tracked: vu != 0 && vp != nil 332 // 333 // Do not set fields manually. Use methods instead. 334 // Extend this type with additional methods if needed. 335 type maybeTraceablePtr struct { 336 vp unsafe.Pointer // For liveness only. 337 vu uintptr // Source of truth. 338 } 339 340 // untrack unsets the pointer but preserves the address. 341 // This is used to hide the pointer from the GC. 342 // 343 //go:nosplit 344 func (p *maybeTraceablePtr) setUntraceable() { 345 p.vp = nil 346 } 347 348 // setTraceable resets the pointer to the stored address. 349 // This is used to make the pointer visible to the GC. 350 // 351 //go:nosplit 352 func (p *maybeTraceablePtr) setTraceable() { 353 p.vp = unsafe.Pointer(p.vu) 354 } 355 356 // set sets the pointer to the data element and updates the address. 357 // 358 //go:nosplit 359 func (p *maybeTraceablePtr) set(v unsafe.Pointer) { 360 p.vp = v 361 p.vu = uintptr(v) 362 } 363 364 // get retrieves the pointer to the data element. 365 // 366 //go:nosplit 367 func (p *maybeTraceablePtr) get() unsafe.Pointer { 368 return unsafe.Pointer(p.vu) 369 } 370 371 // uintptr returns the uintptr address of the pointer. 372 // 373 //go:nosplit 374 func (p *maybeTraceablePtr) uintptr() uintptr { 375 return p.vu 376 } 377 378 // maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr) 379 // to track hchan pointers. 380 // 381 // Do not set fields manually. Use methods instead. 382 type maybeTraceableChan struct { 383 maybeTraceablePtr 384 } 385 386 //go:nosplit 387 func (p *maybeTraceableChan) set(c *hchan) { 388 p.maybeTraceablePtr.set(unsafe.Pointer(c)) 389 } 390 391 //go:nosplit 392 func (p *maybeTraceableChan) get() *hchan { 393 return (*hchan)(p.maybeTraceablePtr.get()) 394 } 395 396 // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving 397 // on a channel. 398 // 399 // sudog is necessary because the g ↔ synchronization object relation 400 // is many-to-many. A g can be on many wait lists, so there may be 401 // many sudogs for one g; and many gs may be waiting on the same 402 // synchronization object, so there may be many sudogs for one object. 403 // 404 // sudogs are allocated from a special pool. Use acquireSudog and 405 // releaseSudog to allocate and free them. 406 type sudog struct { 407 // The following fields are protected by the hchan.lock of the 408 // channel this sudog is blocking on. shrinkstack depends on 409 // this for sudogs involved in channel ops. 410 411 g *g 412 413 next *sudog 414 prev *sudog 415 416 elem maybeTraceablePtr // data element (may point to stack) 417 418 // The following fields are never accessed concurrently. 419 // For channels, waitlink is only accessed by g. 420 // For semaphores, all fields (including the ones above) 421 // are only accessed when holding a semaRoot lock. 422 423 acquiretime int64 424 releasetime int64 425 ticket uint32 426 427 // isSelect indicates g is participating in a select, so 428 // g.selectDone must be CAS'd to win the wake-up race. 429 isSelect bool 430 431 // success indicates whether communication over channel c 432 // succeeded. It is true if the goroutine was awoken because a 433 // value was delivered over channel c, and false if awoken 434 // because c was closed. 435 success bool 436 437 // waiters is a count of semaRoot waiting list other than head of list, 438 // clamped to a uint16 to fit in unused space. 439 // Only meaningful at the head of the list. 440 // (If we wanted to be overly clever, we could store a high 16 bits 441 // in the second entry in the list.) 442 waiters uint16 443 444 parent *sudog // semaRoot binary tree 445 waitlink *sudog // g.waiting list or semaRoot 446 waittail *sudog // semaRoot 447 c maybeTraceableChan // channel 448 } 449 450 type libcall struct { 451 fn uintptr 452 n uintptr // number of parameters 453 args uintptr // parameters 454 r1 uintptr // return values 455 r2 uintptr 456 err uintptr // error number 457 } 458 459 // Stack describes a Go execution stack. 460 // The bounds of the stack are exactly [lo, hi), 461 // with no implicit data structures on either side. 462 type stack struct { 463 lo uintptr 464 hi uintptr 465 } 466 467 // heldLockInfo gives info on a held lock and the rank of that lock 468 type heldLockInfo struct { 469 lockAddr uintptr 470 rank lockRank 471 } 472 473 type g struct { 474 // Stack parameters. 475 // stack describes the actual stack memory: [stack.lo, stack.hi). 476 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 477 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 478 // stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue. 479 // It is stack.lo+StackGuard on g0 and gsignal stacks. 480 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 481 stack stack // offset known to runtime/cgo 482 stackguard0 uintptr // offset known to liblink 483 stackguard1 uintptr // offset known to liblink 484 485 _panic *_panic // innermost panic - offset known to liblink 486 _defer *_defer // innermost defer 487 m *m // current m; offset known to arm liblink 488 sched gobuf 489 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 490 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 491 syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback 492 stktopsp uintptr // expected sp at top of stack, to check in traceback 493 // param is a generic pointer parameter field used to pass 494 // values in particular contexts where other storage for the 495 // parameter would be difficult to find. It is currently used 496 // in four ways: 497 // 1. When a channel operation wakes up a blocked goroutine, it sets param to 498 // point to the sudog of the completed blocking operation. 499 // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed 500 // the GC cycle. It is unsafe to do so in any other way, because the goroutine's 501 // stack may have moved in the meantime. 502 // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a 503 // closure in the runtime is forbidden. 504 // 4. When a panic is recovered and control returns to the respective frame, 505 // param may point to a savedOpenDeferState. 506 param unsafe.Pointer 507 atomicstatus atomic.Uint32 508 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 509 goid uint64 510 schedlink guintptr 511 waitsince int64 // approx time when the g become blocked 512 waitreason waitReason // if status==Gwaiting 513 514 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 515 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 516 preemptShrink bool // shrink stack at synchronous safe point 517 518 // asyncSafePoint is set if g is stopped at an asynchronous 519 // safe point. This means there are frames on the stack 520 // without precise pointer information. 521 asyncSafePoint bool 522 523 paniconfault bool // panic (instead of crash) on unexpected fault address 524 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 525 throwsplit bool // must not split stack 526 // activeStackChans indicates that there are unlocked channels 527 // pointing into this goroutine's stack. If true, stack 528 // copying needs to acquire channel locks to protect these 529 // areas of the stack. 530 activeStackChans bool 531 // parkingOnChan indicates that the goroutine is about to 532 // park on a chansend or chanrecv. Used to signal an unsafe point 533 // for stack shrinking. 534 parkingOnChan atomic.Bool 535 // inMarkAssist indicates whether the goroutine is in mark assist. 536 // Used by the execution tracer. 537 inMarkAssist bool 538 coroexit bool // argument to coroswitch_m 539 540 raceignore int8 // ignore race detection events 541 nocgocallback bool // whether disable callback from C 542 tracking bool // whether we're tracking this G for sched latency statistics 543 trackingSeq uint8 // used to decide whether to track this G 544 trackingStamp int64 // timestamp of when the G last started being tracked 545 runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking 546 lockedm muintptr 547 fipsIndicator uint8 548 fipsOnlyBypass bool 549 syncSafePoint bool // set if g is stopped at a synchronous safe point. 550 runningCleanups atomic.Bool 551 sig uint32 552 secret int32 // current nesting of runtime/secret.Do calls. 553 writebuf []byte 554 sigcode0 uintptr 555 sigcode1 uintptr 556 sigpc uintptr 557 parentGoid uint64 // goid of goroutine that created this goroutine 558 gopc uintptr // pc of go statement that created this goroutine 559 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 560 startpc uintptr // pc of goroutine function 561 racectx uintptr 562 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 563 cgoCtxt []uintptr // cgo traceback context 564 labels unsafe.Pointer // profiler labels 565 timer *timer // cached timer for time.Sleep 566 sleepWhen int64 // when to sleep until 567 selectDone atomic.Uint32 // are we participating in a select and did someone win the race? 568 569 // goroutineProfiled indicates the status of this goroutine's stack for the 570 // current in-progress goroutine profile 571 goroutineProfiled goroutineProfileStateHolder 572 573 coroarg *coro // argument during coroutine transfers 574 bubble *synctestBubble 575 576 // xRegs stores the extended register state if this G has been 577 // asynchronously preempted. 578 xRegs xRegPerG 579 580 // Per-G tracer state. 581 trace gTraceState 582 583 // Per-G GC state 584 585 // gcAssistBytes is this G's GC assist credit in terms of 586 // bytes allocated. If this is positive, then the G has credit 587 // to allocate gcAssistBytes bytes without assisting. If this 588 // is negative, then the G must correct this by performing 589 // scan work. We track this in bytes to make it fast to update 590 // and check for debt in the malloc hot path. The assist ratio 591 // determines how this corresponds to scan work debt. 592 gcAssistBytes int64 593 594 // valgrindStackID is used to track what memory is used for stacks when a program is 595 // built with the "valgrind" build tag, otherwise it is unused. 596 valgrindStackID uintptr 597 } 598 599 // gTrackingPeriod is the number of transitions out of _Grunning between 600 // latency tracking runs. 601 const gTrackingPeriod = 8 602 603 const ( 604 // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms, 605 // like Windows. 606 tlsSlots = 6 607 tlsSize = tlsSlots * goarch.PtrSize 608 ) 609 610 // Values for m.freeWait. 611 const ( 612 freeMStack = 0 // M done, free stack and reference. 613 freeMRef = 1 // M done, free reference. 614 freeMWait = 2 // M still in use. 615 ) 616 617 type m struct { 618 g0 *g // goroutine with scheduling stack 619 morebuf gobuf // gobuf arg to morestack 620 divmod uint32 // div/mod denominator for arm - known to liblink (cmd/internal/obj/arm/obj5.go) 621 622 // Fields whose offsets are not known to debuggers. 623 624 procid uint64 // for debuggers, but offset not hard-coded 625 gsignal *g // signal-handling g 626 goSigStack gsignalStack // Go-allocated signal handling stack 627 sigmask sigset // storage for saved signal mask 628 tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) 629 mstartfn func() 630 curg *g // current running goroutine 631 caughtsig guintptr // goroutine running during fatal signal 632 signalSecret uint32 // whether we have secret information in our signal stack 633 634 // p is the currently attached P for executing Go code, nil if not executing user Go code. 635 // 636 // A non-nil p implies exclusive ownership of the P, unless curg is in _Gsyscall. 637 // In _Gsyscall the scheduler may mutate this instead. The point of synchronization 638 // is the _Gscan bit on curg's status. The scheduler must arrange to prevent curg 639 // from transitioning out of _Gsyscall if it intends to mutate p. 640 p puintptr 641 642 nextp puintptr // The next P to install before executing. Implies exclusive ownership of this P. 643 oldp puintptr // The P that was attached before executing a syscall. 644 id int64 645 mallocing int32 646 throwing throwType 647 preemptoff string // if != "", keep curg running on this m 648 locks int32 649 dying int32 650 profilehz int32 651 spinning bool // m is out of work and is actively looking for work 652 blocked bool // m is blocked on a note 653 newSigstack bool // minit on C thread called sigaltstack 654 printlock int8 655 incgo bool // m is executing a cgo call 656 isextra bool // m is an extra m 657 isExtraInC bool // m is an extra m that does not have any Go frames 658 isExtraInSig bool // m is an extra m in a signal handler 659 freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) 660 needextram bool 661 g0StackAccurate bool // whether the g0 stack has accurate bounds 662 traceback uint8 663 allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise. 664 ncgocall uint64 // number of cgo calls in total 665 ncgo int32 // number of cgo calls currently in progress 666 cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily 667 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 668 park note 669 alllink *m // on allm 670 schedlink muintptr 671 idleNode listNodeManual 672 lockedg guintptr 673 createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. 674 lockedExt uint32 // tracking for external LockOSThread 675 lockedInt uint32 // tracking for internal lockOSThread 676 mWaitList mWaitList // list of runtime lock waiters 677 678 mLockProfile mLockProfile // fields relating to runtime.lock contention 679 profStack []uintptr // used for memory/block/mutex stack traces 680 681 // wait* are used to carry arguments from gopark into park_m, because 682 // there's no stack to put them on. That is their sole purpose. 683 waitunlockf func(*g, unsafe.Pointer) bool 684 waitlock unsafe.Pointer 685 waitTraceSkip int 686 waitTraceBlockReason traceBlockReason 687 688 syscalltick uint32 689 freelink *m // on sched.freem 690 trace mTraceState 691 692 // These are here to avoid using the G stack so the stack can move during the call. 693 libcallpc uintptr // for cpu profiler 694 libcallsp uintptr 695 libcallg guintptr 696 winsyscall winlibcall // stores syscall parameters on windows 697 698 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 699 vdsoPC uintptr // PC for traceback while in VDSO call 700 701 // preemptGen counts the number of completed preemption 702 // signals. This is used to detect when a preemption is 703 // requested, but fails. 704 preemptGen atomic.Uint32 705 706 // Whether this is a pending preemption signal on this M. 707 signalPending atomic.Uint32 708 709 // pcvalue lookup cache 710 pcvalueCache pcvalueCache 711 712 dlogPerM 713 714 mOS 715 716 chacha8 chacha8rand.State 717 cheaprand uint64 718 719 // Up to 10 locks held by this m, maintained by the lock ranking code. 720 locksHeldLen int 721 locksHeld [10]heldLockInfo 722 723 // self points this M until mexit clears it to return nil. 724 self mWeakPointer 725 } 726 727 const mRedZoneSize = (16 << 3) * asanenabledBit // redZoneSize(2048) 728 729 type mPadded struct { 730 m 731 732 // Size the runtime.m structure so it fits in the 2048-byte size class, and 733 // not in the next-smallest (1792-byte) size class. That leaves the 11 low 734 // bits of muintptr values available for flags, as required by 735 // lock_spinbit.go. 736 _ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte 737 } 738 739 // mWeakPointer is a "weak" pointer to an M. A weak pointer for each M is 740 // available as m.self. Users may copy mWeakPointer arbitrarily, and get will 741 // return the M if it is still live, or nil after mexit. 742 // 743 // The zero value is treated as a nil pointer. 744 // 745 // Note that get may race with M exit. A successful get will keep the m object 746 // alive, but the M itself may be exited and thus not actually usable. 747 type mWeakPointer struct { 748 m *atomic.Pointer[m] 749 } 750 751 func newMWeakPointer(mp *m) mWeakPointer { 752 w := mWeakPointer{m: new(atomic.Pointer[m])} 753 w.m.Store(mp) 754 return w 755 } 756 757 func (w mWeakPointer) get() *m { 758 if w.m == nil { 759 return nil 760 } 761 return w.m.Load() 762 } 763 764 // clear sets the weak pointer to nil. It cannot be used on zero value 765 // mWeakPointers. 766 func (w mWeakPointer) clear() { 767 w.m.Store(nil) 768 } 769 770 type p struct { 771 id int32 772 status uint32 // one of pidle/prunning/... 773 link puintptr 774 schedtick uint32 // incremented on every scheduler call 775 syscalltick uint32 // incremented on every system call 776 sysmontick sysmontick // last tick observed by sysmon 777 m muintptr // back-link to associated m (nil if idle) 778 mcache *mcache 779 pcache pageCache 780 raceprocctx uintptr 781 782 // oldm is the previous m this p ran on. 783 // 784 // We are not assosciated with this m, so we have no control over its 785 // lifecycle. This value is an m.self object which points to the m 786 // until the m exits. 787 // 788 // Note that this m may be idle, running, or exiting. It should only be 789 // used with mgetSpecific, which will take ownership of the m only if 790 // it is idle. 791 oldm mWeakPointer 792 793 deferpool []*_defer // pool of available defer structs (see panic.go) 794 deferpoolbuf [32]*_defer 795 796 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 797 goidcache uint64 798 goidcacheend uint64 799 800 // Queue of runnable goroutines. Accessed without lock. 801 runqhead uint32 802 runqtail uint32 803 runq [256]guintptr 804 // runnext, if non-nil, is a runnable G that was ready'd by 805 // the current G and should be run next instead of what's in 806 // runq if there's time remaining in the running G's time 807 // slice. It will inherit the time left in the current time 808 // slice. If a set of goroutines is locked in a 809 // communicate-and-wait pattern, this schedules that set as a 810 // unit and eliminates the (potentially large) scheduling 811 // latency that otherwise arises from adding the ready'd 812 // goroutines to the end of the run queue. 813 // 814 // Note that while other P's may atomically CAS this to zero, 815 // only the owner P can CAS it to a valid G. 816 runnext guintptr 817 818 // Available G's (status == Gdead) 819 gFree gList 820 821 sudogcache []*sudog 822 sudogbuf [128]*sudog 823 824 // Cache of mspan objects from the heap. 825 mspancache struct { 826 // We need an explicit length here because this field is used 827 // in allocation codepaths where write barriers are not allowed, 828 // and eliminating the write barrier/keeping it eliminated from 829 // slice updates is tricky, more so than just managing the length 830 // ourselves. 831 len int 832 buf [128]*mspan 833 } 834 835 // Cache of a single pinner object to reduce allocations from repeated 836 // pinner creation. 837 pinnerCache *pinner 838 839 trace pTraceState 840 841 palloc persistentAlloc // per-P to avoid mutex 842 843 // Per-P GC state 844 gcAssistTime int64 // Nanoseconds in assistAlloc 845 gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker 846 847 // limiterEvent tracks events for the GC CPU limiter. 848 limiterEvent limiterEvent 849 850 // gcMarkWorkerMode is the mode for the next mark worker to run in. 851 // That is, this is used to communicate with the worker goroutine 852 // selected for immediate execution by 853 // gcController.findRunnableGCWorker. When scheduling other goroutines, 854 // this field must be set to gcMarkWorkerNotWorker. 855 gcMarkWorkerMode gcMarkWorkerMode 856 // gcMarkWorkerStartTime is the nanotime() at which the most recent 857 // mark worker started. 858 gcMarkWorkerStartTime int64 859 860 // nextGCMarkWorker is the next mark worker to run. This may be set 861 // during start-the-world to assign a worker to this P. The P runs this 862 // worker on the next call to gcController.findRunnableGCWorker. If the 863 // P runs something else or stops, it must release this worker via 864 // gcController.releaseNextGCMarkWorker. 865 // 866 // See comment in gcBgMarkWorker about the lifetime of 867 // gcBgMarkWorkerNode. 868 // 869 // Only accessed by this P or during STW. 870 nextGCMarkWorker *gcBgMarkWorkerNode 871 872 // gcw is this P's GC work buffer cache. The work buffer is 873 // filled by write barriers, drained by mutator assists, and 874 // disposed on certain GC state transitions. 875 gcw gcWork 876 877 // wbBuf is this P's GC write barrier buffer. 878 // 879 // TODO: Consider caching this in the running G. 880 wbBuf wbBuf 881 882 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 883 884 // statsSeq is a counter indicating whether this P is currently 885 // writing any stats. Its value is even when not, odd when it is. 886 statsSeq atomic.Uint32 887 888 // Timer heap. 889 timers timers 890 891 // Cleanups. 892 cleanups *cleanupBlock 893 cleanupsQueued uint64 // monotonic count of cleanups queued by this P 894 895 // maxStackScanDelta accumulates the amount of stack space held by 896 // live goroutines (i.e. those eligible for stack scanning). 897 // Flushed to gcController.maxStackScan once maxStackScanSlack 898 // or -maxStackScanSlack is reached. 899 maxStackScanDelta int64 900 901 // gc-time statistics about current goroutines 902 // Note that this differs from maxStackScan in that this 903 // accumulates the actual stack observed to be used at GC time (hi - sp), 904 // not an instantaneous measure of the total stack size that might need 905 // to be scanned (hi - lo). 906 scannedStackSize uint64 // stack size of goroutines scanned by this P 907 scannedStacks uint64 // number of goroutines scanned by this P 908 909 // preempt is set to indicate that this P should be enter the 910 // scheduler ASAP (regardless of what G is running on it). 911 preempt bool 912 913 // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. 914 gcStopTime int64 915 916 // goroutinesCreated is the total count of goroutines created by this P. 917 goroutinesCreated uint64 918 919 // xRegs is the per-P extended register state used by asynchronous 920 // preemption. This is an empty struct on platforms that don't use extended 921 // register state. 922 xRegs xRegPerP 923 924 // Padding is no longer needed. False sharing is now not a worry because p is large enough 925 // that its size class is an integer multiple of the cache line size (for any of our architectures). 926 } 927 928 type schedt struct { 929 goidgen atomic.Uint64 930 lastpoll atomic.Int64 // time of last network poll, 0 if currently polling 931 pollUntil atomic.Int64 // time to which current poll is sleeping 932 pollingNet atomic.Int32 // 1 if some P doing non-blocking network poll 933 934 lock mutex 935 936 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 937 // sure to call checkdead(). 938 939 midle listHeadManual // idle m's waiting for work 940 nmidle int32 // number of idle m's waiting for work 941 nmidlelocked int32 // number of locked m's waiting for work 942 mnext int64 // number of m's that have been created and next M ID 943 maxmcount int32 // maximum number of m's allowed (or die) 944 nmsys int32 // number of system m's not counted for deadlock 945 nmfreed int64 // cumulative number of freed m's 946 947 ngsys atomic.Int32 // number of system goroutines 948 nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P 949 950 pidle puintptr // idle p's 951 npidle atomic.Int32 952 nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go. 953 needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1. 954 955 // Global runnable queue. 956 runq gQueue 957 958 // disable controls selective disabling of the scheduler. 959 // 960 // Use schedEnableUser to control this. 961 // 962 // disable is protected by sched.lock. 963 disable struct { 964 // user disables scheduling of user goroutines. 965 user bool 966 runnable gQueue // pending runnable Gs 967 } 968 969 // Global cache of dead G's. 970 gFree struct { 971 lock mutex 972 stack gList // Gs with stacks 973 noStack gList // Gs without stacks 974 } 975 976 // Central cache of sudog structs. 977 sudoglock mutex 978 sudogcache *sudog 979 980 // Central pool of available defer structs. 981 deferlock mutex 982 deferpool *_defer 983 984 // freem is the list of m's waiting to be freed when their 985 // m.exited is set. Linked through m.freelink. 986 freem *m 987 988 gcwaiting atomic.Bool // gc is waiting to run 989 stopwait int32 990 stopnote note 991 sysmonwait atomic.Bool 992 sysmonnote note 993 994 // safePointFn should be called on each P at the next GC 995 // safepoint if p.runSafePointFn is set. 996 safePointFn func(*p) 997 safePointWait int32 998 safePointNote note 999 1000 profilehz int32 // cpu profiling rate 1001 1002 procresizetime int64 // nanotime() of last change to gomaxprocs 1003 totaltime int64 // ∫gomaxprocs dt up to procresizetime 1004 1005 customGOMAXPROCS bool // GOMAXPROCS was manually set from the environment or runtime.GOMAXPROCS 1006 1007 // sysmonlock protects sysmon's actions on the runtime. 1008 // 1009 // Acquire and hold this mutex to block sysmon from interacting 1010 // with the rest of the runtime. 1011 sysmonlock mutex 1012 1013 // timeToRun is a distribution of scheduling latencies, defined 1014 // as the sum of time a G spends in the _Grunnable state before 1015 // it transitions to _Grunning. 1016 timeToRun timeHistogram 1017 1018 // idleTime is the total CPU time Ps have "spent" idle. 1019 // 1020 // Reset on each GC cycle. 1021 idleTime atomic.Int64 1022 1023 // totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting 1024 // with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock. 1025 totalMutexWaitTime atomic.Int64 1026 1027 // stwStoppingTimeGC/Other are distributions of stop-the-world stopping 1028 // latencies, defined as the time taken by stopTheWorldWithSema to get 1029 // all Ps to stop. stwStoppingTimeGC covers all GC-related STWs, 1030 // stwStoppingTimeOther covers the others. 1031 stwStoppingTimeGC timeHistogram 1032 stwStoppingTimeOther timeHistogram 1033 1034 // stwTotalTimeGC/Other are distributions of stop-the-world total 1035 // latencies, defined as the total time from stopTheWorldWithSema to 1036 // startTheWorldWithSema. This is a superset of 1037 // stwStoppingTimeGC/Other. stwTotalTimeGC covers all GC-related STWs, 1038 // stwTotalTimeOther covers the others. 1039 stwTotalTimeGC timeHistogram 1040 stwTotalTimeOther timeHistogram 1041 1042 // totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in 1043 // allm) is the sum of time goroutines have spent in _Grunnable and with an 1044 // M, but waiting for locks within the runtime. This field stores the value 1045 // for Ms that have exited. 1046 totalRuntimeLockWaitTime atomic.Int64 1047 1048 // goroutinesCreated (plus the value of goroutinesCreated on each P in allp) 1049 // is the sum of all goroutines created by the program. 1050 goroutinesCreated atomic.Uint64 1051 } 1052 1053 // Values for the flags field of a sigTabT. 1054 const ( 1055 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 1056 _SigKill // if signal.Notify doesn't take it, exit quietly 1057 _SigThrow // if signal.Notify doesn't take it, exit loudly 1058 _SigPanic // if the signal is from the kernel, panic 1059 _SigDefault // if the signal isn't explicitly requested, don't monitor it 1060 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 1061 _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler 1062 _SigUnblock // always unblock; see blockableSig 1063 _SigIgn // _SIG_DFL action is to ignore the signal 1064 ) 1065 1066 // Layout of in-memory per-function information prepared by linker 1067 // See https://golang.org/s/go12symtab. 1068 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 1069 // and with package debug/gosym and with symtab.go in package runtime. 1070 type _func struct { 1071 sys.NotInHeap // Only in static data 1072 1073 entryOff uint32 // start pc, as offset from moduledata.text 1074 nameOff int32 // function name, as index into moduledata.funcnametab. 1075 1076 args int32 // in/out args size 1077 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. 1078 1079 pcsp uint32 1080 pcfile uint32 1081 pcln uint32 1082 npcdata uint32 1083 cuOffset uint32 // runtime.cutab offset of this function's CU 1084 startLine int32 // line number of start of function (func keyword/TEXT directive) 1085 funcID abi.FuncID // set for certain special runtime functions 1086 flag abi.FuncFlag 1087 _ [1]byte // pad 1088 nfuncdata uint8 // must be last, must end on a uint32-aligned boundary 1089 1090 // The end of the struct is followed immediately by two variable-length 1091 // arrays that reference the pcdata and funcdata locations for this 1092 // function. 1093 1094 // pcdata contains the offset into moduledata.pctab for the start of 1095 // that index's table. e.g., 1096 // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of 1097 // the unsafe point table. 1098 // 1099 // An offset of 0 indicates that there is no table. 1100 // 1101 // pcdata [npcdata]uint32 1102 1103 // funcdata contains the offset past moduledata.gofunc which contains a 1104 // pointer to that index's funcdata. e.g., 1105 // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is 1106 // the argument pointer map. 1107 // 1108 // An offset of ^uint32(0) indicates that there is no entry. 1109 // 1110 // funcdata [nfuncdata]uint32 1111 } 1112 1113 // Pseudo-Func that is returned for PCs that occur in inlined code. 1114 // A *Func can be either a *_func or a *funcinl, and they are distinguished 1115 // by the first uintptr. 1116 // 1117 // TODO(austin): Can we merge this with inlinedCall? 1118 type funcinl struct { 1119 ones uint32 // set to ^0 to distinguish from _func 1120 entry uintptr // entry of the real (the "outermost") frame 1121 name string 1122 file string 1123 line int32 1124 startLine int32 1125 } 1126 1127 type itab = abi.ITab 1128 1129 // Lock-free stack node. 1130 // Also known to export_test.go. 1131 type lfnode struct { 1132 next uint64 1133 pushcnt uintptr 1134 } 1135 1136 type forcegcstate struct { 1137 lock mutex 1138 g *g 1139 idle atomic.Bool 1140 } 1141 1142 // A _defer holds an entry on the list of deferred calls. 1143 // If you add a field here, add code to clear it in deferProcStack. 1144 // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct 1145 // and cmd/compile/internal/ssagen/ssa.go:(*state).call. 1146 // Some defers will be allocated on the stack and some on the heap. 1147 // All defers are logically part of the stack, so write barriers to 1148 // initialize them are not required. All defers must be manually scanned, 1149 // and for heap defers, marked. 1150 type _defer struct { 1151 heap bool 1152 rangefunc bool // true for rangefunc list 1153 sp uintptr // sp at time of defer 1154 pc uintptr // pc at time of defer 1155 fn func() // can be nil for open-coded defers 1156 link *_defer // next defer on G; can point to either heap or stack! 1157 1158 // If rangefunc is true, *head is the head of the atomic linked list 1159 // during a range-over-func execution. 1160 head *atomic.Pointer[_defer] 1161 } 1162 1163 // A _panic holds information about an active panic. 1164 // 1165 // A _panic value must only ever live on the stack. 1166 // 1167 // The gopanicFP and link fields are stack pointers, but don't need special 1168 // handling during stack growth: because they are pointer-typed and 1169 // _panic values only live on the stack, regular stack pointer 1170 // adjustment takes care of them. 1171 type _panic struct { 1172 arg any // argument to panic 1173 link *_panic // link to earlier panic 1174 1175 // startPC and startSP track where _panic.start was called. 1176 startPC uintptr 1177 startSP unsafe.Pointer 1178 1179 // The current stack frame that we're running deferred calls for. 1180 sp unsafe.Pointer 1181 lr uintptr 1182 fp unsafe.Pointer 1183 1184 // retpc stores the PC where the panic should jump back to, if the 1185 // function last returned by _panic.next() recovers the panic. 1186 retpc uintptr 1187 1188 // Extra state for handling open-coded defers. 1189 deferBitsPtr *uint8 1190 slotsPtr unsafe.Pointer 1191 1192 recovered bool // whether this panic has been recovered 1193 repanicked bool // whether this panic repanicked 1194 goexit bool 1195 deferreturn bool 1196 1197 gopanicFP unsafe.Pointer // frame pointer of the gopanic frame 1198 } 1199 1200 // savedOpenDeferState tracks the extra state from _panic that's 1201 // necessary for deferreturn to pick up where gopanic left off, 1202 // without needing to unwind the stack. 1203 type savedOpenDeferState struct { 1204 retpc uintptr 1205 deferBitsOffset uintptr 1206 slotsOffset uintptr 1207 } 1208 1209 // ancestorInfo records details of where a goroutine was started. 1210 type ancestorInfo struct { 1211 pcs []uintptr // pcs from the stack of this goroutine 1212 goid uint64 // goroutine id of this goroutine; original goroutine possibly dead 1213 gopc uintptr // pc of go statement that created this goroutine 1214 } 1215 1216 // A waitReason explains why a goroutine has been stopped. 1217 // See gopark. Do not re-use waitReasons, add new ones. 1218 type waitReason uint8 1219 1220 const ( 1221 waitReasonZero waitReason = iota // "" 1222 waitReasonGCAssistMarking // "GC assist marking" 1223 waitReasonIOWait // "IO wait" 1224 waitReasonDumpingHeap // "dumping heap" 1225 waitReasonGarbageCollection // "garbage collection" 1226 waitReasonGarbageCollectionScan // "garbage collection scan" 1227 waitReasonPanicWait // "panicwait" 1228 waitReasonGCAssistWait // "GC assist wait" 1229 waitReasonGCSweepWait // "GC sweep wait" 1230 waitReasonGCScavengeWait // "GC scavenge wait" 1231 waitReasonFinalizerWait // "finalizer wait" 1232 waitReasonForceGCIdle // "force gc (idle)" 1233 waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)" 1234 waitReasonSemacquire // "semacquire" 1235 waitReasonSleep // "sleep" 1236 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 1237 waitReasonChanSendNilChan // "chan send (nil chan)" 1238 waitReasonSelectNoCases // "select (no cases)" 1239 waitReasonSelect // "select" 1240 waitReasonChanReceive // "chan receive" 1241 waitReasonChanSend // "chan send" 1242 waitReasonSyncCondWait // "sync.Cond.Wait" 1243 waitReasonSyncMutexLock // "sync.Mutex.Lock" 1244 waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock" 1245 waitReasonSyncRWMutexLock // "sync.RWMutex.Lock" 1246 waitReasonSyncWaitGroupWait // "sync.WaitGroup.Wait" 1247 waitReasonTraceReaderBlocked // "trace reader (blocked)" 1248 waitReasonWaitForGCCycle // "wait for GC cycle" 1249 waitReasonGCWorkerIdle // "GC worker (idle)" 1250 waitReasonGCWorkerActive // "GC worker (active)" 1251 waitReasonPreempted // "preempted" 1252 waitReasonDebugCall // "debug call" 1253 waitReasonGCMarkTermination // "GC mark termination" 1254 waitReasonStoppingTheWorld // "stopping the world" 1255 waitReasonFlushProcCaches // "flushing proc caches" 1256 waitReasonTraceGoroutineStatus // "trace goroutine status" 1257 waitReasonTraceProcStatus // "trace proc status" 1258 waitReasonPageTraceFlush // "page trace flush" 1259 waitReasonCoroutine // "coroutine" 1260 waitReasonGCWeakToStrongWait // "GC weak to strong wait" 1261 waitReasonSynctestRun // "synctest.Run" 1262 waitReasonSynctestWait // "synctest.Wait" 1263 waitReasonSynctestChanReceive // "chan receive (durable)" 1264 waitReasonSynctestChanSend // "chan send (durable)" 1265 waitReasonSynctestSelect // "select (durable)" 1266 waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (durable)" 1267 waitReasonCleanupWait // "cleanup wait" 1268 ) 1269 1270 var waitReasonStrings = [...]string{ 1271 waitReasonZero: "", 1272 waitReasonGCAssistMarking: "GC assist marking", 1273 waitReasonIOWait: "IO wait", 1274 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 1275 waitReasonChanSendNilChan: "chan send (nil chan)", 1276 waitReasonDumpingHeap: "dumping heap", 1277 waitReasonGarbageCollection: "garbage collection", 1278 waitReasonGarbageCollectionScan: "garbage collection scan", 1279 waitReasonPanicWait: "panicwait", 1280 waitReasonSelect: "select", 1281 waitReasonSelectNoCases: "select (no cases)", 1282 waitReasonGCAssistWait: "GC assist wait", 1283 waitReasonGCSweepWait: "GC sweep wait", 1284 waitReasonGCScavengeWait: "GC scavenge wait", 1285 waitReasonChanReceive: "chan receive", 1286 waitReasonChanSend: "chan send", 1287 waitReasonFinalizerWait: "finalizer wait", 1288 waitReasonForceGCIdle: "force gc (idle)", 1289 waitReasonUpdateGOMAXPROCSIdle: "GOMAXPROCS updater (idle)", 1290 waitReasonSemacquire: "semacquire", 1291 waitReasonSleep: "sleep", 1292 waitReasonSyncCondWait: "sync.Cond.Wait", 1293 waitReasonSyncMutexLock: "sync.Mutex.Lock", 1294 waitReasonSyncRWMutexRLock: "sync.RWMutex.RLock", 1295 waitReasonSyncRWMutexLock: "sync.RWMutex.Lock", 1296 waitReasonSyncWaitGroupWait: "sync.WaitGroup.Wait", 1297 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1298 waitReasonWaitForGCCycle: "wait for GC cycle", 1299 waitReasonGCWorkerIdle: "GC worker (idle)", 1300 waitReasonGCWorkerActive: "GC worker (active)", 1301 waitReasonPreempted: "preempted", 1302 waitReasonDebugCall: "debug call", 1303 waitReasonGCMarkTermination: "GC mark termination", 1304 waitReasonStoppingTheWorld: "stopping the world", 1305 waitReasonFlushProcCaches: "flushing proc caches", 1306 waitReasonTraceGoroutineStatus: "trace goroutine status", 1307 waitReasonTraceProcStatus: "trace proc status", 1308 waitReasonPageTraceFlush: "page trace flush", 1309 waitReasonCoroutine: "coroutine", 1310 waitReasonGCWeakToStrongWait: "GC weak to strong wait", 1311 waitReasonSynctestRun: "synctest.Run", 1312 waitReasonSynctestWait: "synctest.Wait", 1313 waitReasonSynctestChanReceive: "chan receive (durable)", 1314 waitReasonSynctestChanSend: "chan send (durable)", 1315 waitReasonSynctestSelect: "select (durable)", 1316 waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (durable)", 1317 waitReasonCleanupWait: "cleanup wait", 1318 } 1319 1320 func (w waitReason) String() string { 1321 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1322 return "unknown wait reason" 1323 } 1324 return waitReasonStrings[w] 1325 } 1326 1327 // isMutexWait returns true if the goroutine is blocked because of 1328 // sync.Mutex.Lock or sync.RWMutex.[R]Lock. 1329 // 1330 //go:nosplit 1331 func (w waitReason) isMutexWait() bool { 1332 return w == waitReasonSyncMutexLock || 1333 w == waitReasonSyncRWMutexRLock || 1334 w == waitReasonSyncRWMutexLock 1335 } 1336 1337 // isSyncWait returns true if the goroutine is blocked because of 1338 // sync library primitive operations. 1339 // 1340 //go:nosplit 1341 func (w waitReason) isSyncWait() bool { 1342 return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait 1343 } 1344 1345 // isChanWait is true if the goroutine is blocked because of non-nil 1346 // channel operations or a select statement with at least one case. 1347 // 1348 //go:nosplit 1349 func (w waitReason) isChanWait() bool { 1350 return w == waitReasonSelect || 1351 w == waitReasonChanReceive || 1352 w == waitReasonChanSend 1353 } 1354 1355 func (w waitReason) isWaitingForSuspendG() bool { 1356 return isWaitingForSuspendG[w] 1357 } 1358 1359 // isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and 1360 // setting a waitReason because it needs to be able to let the suspendG 1361 // (used by the GC and the execution tracer) take ownership of its stack. 1362 // The G is always actually executing on the system stack in these cases. 1363 // 1364 // TODO(mknyszek): Consider replacing this with a new dedicated G status. 1365 var isWaitingForSuspendG = [len(waitReasonStrings)]bool{ 1366 waitReasonStoppingTheWorld: true, 1367 waitReasonGCMarkTermination: true, 1368 waitReasonGarbageCollection: true, 1369 waitReasonGarbageCollectionScan: true, 1370 waitReasonTraceGoroutineStatus: true, 1371 waitReasonTraceProcStatus: true, 1372 waitReasonPageTraceFlush: true, 1373 waitReasonGCAssistMarking: true, 1374 waitReasonGCWorkerActive: true, 1375 waitReasonFlushProcCaches: true, 1376 } 1377 1378 func (w waitReason) isIdleInSynctest() bool { 1379 return isIdleInSynctest[w] 1380 } 1381 1382 // isIdleInSynctest indicates that a goroutine is considered idle by synctest.Wait. 1383 var isIdleInSynctest = [len(waitReasonStrings)]bool{ 1384 waitReasonChanReceiveNilChan: true, 1385 waitReasonChanSendNilChan: true, 1386 waitReasonSelectNoCases: true, 1387 waitReasonSleep: true, 1388 waitReasonSyncCondWait: true, 1389 waitReasonSynctestWaitGroupWait: true, 1390 waitReasonCoroutine: true, 1391 waitReasonSynctestRun: true, 1392 waitReasonSynctestWait: true, 1393 waitReasonSynctestChanReceive: true, 1394 waitReasonSynctestChanSend: true, 1395 waitReasonSynctestSelect: true, 1396 } 1397 1398 var ( 1399 // Linked-list of all Ms. Written under sched.lock, read atomically. 1400 allm *m 1401 1402 gomaxprocs int32 1403 numCPUStartup int32 1404 forcegc forcegcstate 1405 sched schedt 1406 newprocs int32 1407 ) 1408 1409 var ( 1410 // allpLock protects P-less reads and size changes of allp, idlepMask, 1411 // and timerpMask, and all writes to allp. 1412 allpLock mutex 1413 1414 // len(allp) == gomaxprocs; may change at safe points, otherwise 1415 // immutable. 1416 allp []*p 1417 1418 // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must 1419 // be atomic. Length may change at safe points. 1420 // 1421 // Each P must update only its own bit. In order to maintain 1422 // consistency, a P going idle must set the idle mask simultaneously with 1423 // updates to the idle P list under the sched.lock, otherwise a racing 1424 // pidleget may clear the mask before pidleput sets the mask, 1425 // corrupting the bitmap. 1426 // 1427 // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema. 1428 idlepMask pMask 1429 1430 // Bitmask of Ps that may have a timer, one bit per P. Reads and writes 1431 // must be atomic. Length may change at safe points. 1432 // 1433 // Ideally, the timer mask would be kept immediately consistent on any timer 1434 // operations. Unfortunately, updating a shared global data structure in the 1435 // timer hot path adds too much overhead in applications frequently switching 1436 // between no timers and some timers. 1437 // 1438 // As a compromise, the timer mask is updated only on pidleget / pidleput. A 1439 // running P (returned by pidleget) may add a timer at any time, so its mask 1440 // must be set. An idle P (passed to pidleput) cannot add new timers while 1441 // idle, so if it has no timers at that time, its mask may be cleared. 1442 // 1443 // Thus, we get the following effects on timer-stealing in findRunnable: 1444 // 1445 // - Idle Ps with no timers when they go idle are never checked in findRunnable 1446 // (for work- or timer-stealing; this is the ideal case). 1447 // - Running Ps must always be checked. 1448 // - Idle Ps whose timers are stolen must continue to be checked until they run 1449 // again, even after timer expiration. 1450 // 1451 // When the P starts running again, the mask should be set, as a timer may be 1452 // added at any time. 1453 // 1454 // TODO(prattmic): Additional targeted updates may improve the above cases. 1455 // e.g., updating the mask when stealing a timer. 1456 timerpMask pMask 1457 ) 1458 1459 // goarmsoftfp is used by runtime/cgo assembly. 1460 // 1461 //go:linkname goarmsoftfp 1462 1463 var ( 1464 // Pool of GC parked background workers. Entries are type 1465 // *gcBgMarkWorkerNode. 1466 gcBgMarkWorkerPool lfstack 1467 1468 // Total number of gcBgMarkWorker goroutines. Protected by worldsema. 1469 gcBgMarkWorkerCount int32 1470 1471 // Information about what cpu features are available. 1472 // Packages outside the runtime should not use these 1473 // as they are not an external api. 1474 // Set on startup in asm_{386,amd64}.s 1475 processorVersionInfo uint32 1476 isIntel bool 1477 ) 1478 1479 // set by cmd/link on arm systems 1480 // accessed using linkname by internal/runtime/atomic. 1481 // 1482 // goarm should be an internal detail, 1483 // but widely used packages access it using linkname. 1484 // Notable members of the hall of shame include: 1485 // - github.com/creativeprojects/go-selfupdate 1486 // 1487 // Do not remove or change the type signature. 1488 // See go.dev/issue/67401. 1489 // 1490 //go:linkname goarm 1491 var ( 1492 goarm uint8 1493 goarmsoftfp uint8 1494 ) 1495 1496 // Set by the linker so the runtime can determine the buildmode. 1497 var ( 1498 islibrary bool // -buildmode=c-shared 1499 isarchive bool // -buildmode=c-archive 1500 ) 1501 1502 // Must agree with internal/buildcfg.FramePointerEnabled. 1503 const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" 1504 1505 // getcallerfp returns the frame pointer of the caller of the caller 1506 // of this function. 1507 // 1508 //go:nosplit 1509 //go:noinline 1510 func getcallerfp() uintptr { 1511 fp := getfp() // This frame's FP. 1512 if fp != 0 { 1513 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's FP. 1514 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's caller's FP. 1515 } 1516 return fp 1517 } 1518