Source file src/runtime/runtime2.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/chacha8rand" 10 "internal/goarch" 11 "internal/runtime/atomic" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // defined constants 17 const ( 18 // G status 19 // 20 // Beyond indicating the general state of a G, the G status 21 // acts like a lock on the goroutine's stack (and hence its 22 // ability to execute user code). 23 // 24 // If you add to this list, add to the list 25 // of "okay during garbage collection" status 26 // in mgcmark.go too. 27 // 28 // TODO(austin): The _Gscan bit could be much lighter-weight. 29 // For example, we could choose not to run _Gscanrunnable 30 // goroutines found in the run queue, rather than CAS-looping 31 // until they become _Grunnable. And transitions like 32 // _Gscanwaiting -> _Gscanrunnable are actually okay because 33 // they don't affect stack ownership. 34 35 // _Gidle means this goroutine was just allocated and has not 36 // yet been initialized. 37 _Gidle = iota // 0 38 39 // _Grunnable means this goroutine is on a run queue. It is 40 // not currently executing user code. The stack is not owned. 41 _Grunnable // 1 42 43 // _Grunning means this goroutine may execute user code. The 44 // stack is owned by this goroutine. It is not on a run queue. 45 // It is assigned an M (g.m is valid) and it usually has a P 46 // (g.m.p is valid), but there are small windows of time where 47 // it might not, namely upon entering and exiting _Gsyscall. 48 _Grunning // 2 49 50 // _Gsyscall means this goroutine is executing a system call. 51 // It is not executing user code. The stack is owned by this 52 // goroutine. It is not on a run queue. It is assigned an M. 53 // It may have a P attached, but it does not own it. Code 54 // executing in this state must not touch g.m.p. 55 _Gsyscall // 3 56 57 // _Gwaiting means this goroutine is blocked in the runtime. 58 // It is not executing user code. It is not on a run queue, 59 // but should be recorded somewhere (e.g., a channel wait 60 // queue) so it can be ready()d when necessary. The stack is 61 // not owned *except* that a channel operation may read or 62 // write parts of the stack under the appropriate channel 63 // lock. Otherwise, it is not safe to access the stack after a 64 // goroutine enters _Gwaiting (e.g., it may get moved). 65 _Gwaiting // 4 66 67 // _Gmoribund_unused is currently unused, but hardcoded in gdb 68 // scripts. 69 _Gmoribund_unused // 5 70 71 // _Gdead means this goroutine is currently unused. It may be 72 // just exited, on a free list, or just being initialized. It 73 // is not executing user code. It may or may not have a stack 74 // allocated. The G and its stack (if any) are owned by the M 75 // that is exiting the G or that obtained the G from the free 76 // list. 77 _Gdead // 6 78 79 // _Genqueue_unused is currently unused. 80 _Genqueue_unused // 7 81 82 // _Gcopystack means this goroutine's stack is being moved. It 83 // is not executing user code and is not on a run queue. The 84 // stack is owned by the goroutine that put it in _Gcopystack. 85 _Gcopystack // 8 86 87 // _Gpreempted means this goroutine stopped itself for a 88 // suspendG preemption. It is like _Gwaiting, but nothing is 89 // yet responsible for ready()ing it. Some suspendG must CAS 90 // the status to _Gwaiting to take responsibility for 91 // ready()ing this G. 92 _Gpreempted // 9 93 94 // _Gleaked represents a leaked goroutine caught by the GC. 95 _Gleaked // 10 96 97 // _Gdeadextra is a _Gdead goroutine that's attached to an extra M 98 // used for cgo callbacks. 99 _Gdeadextra // 11 100 101 // _Gscan combined with one of the above states other than 102 // _Grunning indicates that GC is scanning the stack. The 103 // goroutine is not executing user code and the stack is owned 104 // by the goroutine that set the _Gscan bit. 105 // 106 // _Gscanrunning is different: it is used to briefly block 107 // state transitions while GC signals the G to scan its own 108 // stack. This is otherwise like _Grunning. 109 // 110 // atomicstatus&~Gscan gives the state the goroutine will 111 // return to when the scan completes. 112 _Gscan = 0x1000 113 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 114 _Gscanrunning = _Gscan + _Grunning // 0x1002 115 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 116 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 117 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 118 _Gscanleaked = _Gscan + _Gleaked // 0x100a 119 _Gscandeadextra = _Gscan + _Gdeadextra // 0x100b 120 ) 121 122 const ( 123 // P status 124 125 // _Pidle means a P is not being used to run user code or the 126 // scheduler. Typically, it's on the idle P list and available 127 // to the scheduler, but it may just be transitioning between 128 // other states. 129 // 130 // The P is owned by the idle list or by whatever is 131 // transitioning its state. Its run queue is empty. 132 _Pidle = iota 133 134 // _Prunning means a P is owned by an M and is being used to 135 // run user code or the scheduler. Only the M that owns this P 136 // is allowed to change the P's status from _Prunning. The M 137 // may transition the P to _Pidle (if it has no more work to 138 // do), or _Pgcstop (to halt for the GC). The M may also hand 139 // ownership of the P off directly to another M (for example, 140 // to schedule a locked G). 141 _Prunning 142 143 // _Psyscall_unused is a now-defunct state for a P. A P is 144 // identified as "in a system call" by looking at the goroutine's 145 // state. 146 _Psyscall_unused 147 148 // _Pgcstop means a P is halted for STW and owned by the M 149 // that stopped the world. The M that stopped the world 150 // continues to use its P, even in _Pgcstop. Transitioning 151 // from _Prunning to _Pgcstop causes an M to release its P and 152 // park. 153 // 154 // The P retains its run queue and startTheWorld will restart 155 // the scheduler on Ps with non-empty run queues. 156 _Pgcstop 157 158 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 159 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 160 // stripped of its resources, though a few things remain 161 // (e.g., trace buffers). 162 _Pdead 163 ) 164 165 // Mutual exclusion locks. In the uncontended case, 166 // as fast as spin locks (just a few user-level instructions), 167 // but on the contention path they sleep in the kernel. 168 // A zeroed Mutex is unlocked (no need to initialize each lock). 169 // Initialization is helpful for static lock ranking, but not required. 170 type mutex struct { 171 // Empty struct if lock ranking is disabled, otherwise includes the lock rank 172 lockRankStruct 173 // Futex-based impl treats it as uint32 key, 174 // while sema-based impl as M* waitm. 175 // Used to be a union, but unions break precise GC. 176 key uintptr 177 } 178 179 type funcval struct { 180 fn uintptr 181 // variable-size, fn-specific data here 182 } 183 184 type iface struct { 185 tab *itab 186 data unsafe.Pointer 187 } 188 189 type eface struct { 190 _type *_type 191 data unsafe.Pointer 192 } 193 194 func efaceOf(ep *any) *eface { 195 return (*eface)(unsafe.Pointer(ep)) 196 } 197 198 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 199 // It is particularly important to avoid write barriers when the current P has 200 // been released, because the GC thinks the world is stopped, and an 201 // unexpected write barrier would not be synchronized with the GC, 202 // which can lead to a half-executed write barrier that has marked the object 203 // but not queued it. If the GC skips the object and completes before the 204 // queuing can occur, it will incorrectly free the object. 205 // 206 // We tried using special assignment functions invoked only when not 207 // holding a running P, but then some updates to a particular memory 208 // word went through write barriers and some did not. This breaks the 209 // write barrier shadow checking mode, and it is also scary: better to have 210 // a word that is completely ignored by the GC than to have one for which 211 // only a few updates are ignored. 212 // 213 // Gs and Ps are always reachable via true pointers in the 214 // allgs and allp lists or (during allocation before they reach those lists) 215 // from stack variables. 216 // 217 // Ms are always reachable via true pointers either from allm or 218 // freem. Unlike Gs and Ps we do free Ms, so it's important that 219 // nothing ever hold an muintptr across a safe point. 220 221 // A guintptr holds a goroutine pointer, but typed as a uintptr 222 // to bypass write barriers. It is used in the Gobuf goroutine state 223 // and in scheduling lists that are manipulated without a P. 224 // 225 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 226 // In one of the few places it is updated by Go code - func save - it must be 227 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 228 // Instead of figuring out how to emit the write barriers missing in the 229 // assembly manipulation, we change the type of the field to uintptr, 230 // so that it does not require write barriers at all. 231 // 232 // Goroutine structs are published in the allg list and never freed. 233 // That will keep the goroutine structs from being collected. 234 // There is never a time that Gobuf.g's contain the only references 235 // to a goroutine: the publishing of the goroutine in allg comes first. 236 // Goroutine pointers are also kept in non-GC-visible places like TLS, 237 // so I can't see them ever moving. If we did want to start moving data 238 // in the GC, we'd need to allocate the goroutine structs from an 239 // alternate arena. Using guintptr doesn't make that problem any worse. 240 // Note that pollDesc.rg, pollDesc.wg also store g in uintptr form, 241 // so they would need to be updated too if g's start moving. 242 type guintptr uintptr 243 244 //go:nosplit 245 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 246 247 //go:nosplit 248 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 249 250 //go:nosplit 251 func (gp *guintptr) cas(old, new guintptr) bool { 252 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 253 } 254 255 //go:nosplit 256 func (gp *g) guintptr() guintptr { 257 return guintptr(unsafe.Pointer(gp)) 258 } 259 260 // setGNoWB performs *gp = new without a write barrier. 261 // For times when it's impractical to use a guintptr. 262 // 263 //go:nosplit 264 //go:nowritebarrier 265 func setGNoWB(gp **g, new *g) { 266 (*guintptr)(unsafe.Pointer(gp)).set(new) 267 } 268 269 type puintptr uintptr 270 271 //go:nosplit 272 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 273 274 //go:nosplit 275 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 276 277 // muintptr is a *m that is not tracked by the garbage collector. 278 // 279 // Because we do free Ms, there are some additional constrains on 280 // muintptrs: 281 // 282 // 1. Never hold an muintptr locally across a safe point. 283 // 284 // 2. Any muintptr in the heap must be owned by the M itself so it can 285 // ensure it is not in use when the last true *m is released. 286 type muintptr uintptr 287 288 //go:nosplit 289 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 290 291 //go:nosplit 292 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 293 294 // setMNoWB performs *mp = new without a write barrier. 295 // For times when it's impractical to use an muintptr. 296 // 297 //go:nosplit 298 //go:nowritebarrier 299 func setMNoWB(mp **m, new *m) { 300 (*muintptr)(unsafe.Pointer(mp)).set(new) 301 } 302 303 type gobuf struct { 304 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 305 // 306 // ctxt is unusual with respect to GC: it may be a 307 // heap-allocated funcval, so GC needs to track it, but it 308 // needs to be set and cleared from assembly, where it's 309 // difficult to have write barriers. However, ctxt is really a 310 // saved, live register, and we only ever exchange it between 311 // the real register and the gobuf. Hence, we treat it as a 312 // root during stack scanning, which means assembly that saves 313 // and restores it doesn't need write barriers. It's still 314 // typed as a pointer so that any other writes from Go get 315 // write barriers. 316 sp uintptr 317 pc uintptr 318 g guintptr 319 ctxt unsafe.Pointer 320 lr uintptr 321 bp uintptr // for framepointer-enabled architectures 322 } 323 324 // maybeTraceablePtr is a special pointer that is conditionally trackable 325 // by the GC. It consists of an address as a uintptr (vu) and a pointer 326 // to a data element (vp). 327 // 328 // maybeTraceablePtr values can be in one of three states: 329 // 1. Unset: vu == 0 && vp == nil 330 // 2. Untracked: vu != 0 && vp == nil 331 // 3. Tracked: vu != 0 && vp != nil 332 // 333 // Do not set fields manually. Use methods instead. 334 // Extend this type with additional methods if needed. 335 type maybeTraceablePtr struct { 336 vp unsafe.Pointer // For liveness only. 337 vu uintptr // Source of truth. 338 } 339 340 // untrack unsets the pointer but preserves the address. 341 // This is used to hide the pointer from the GC. 342 // 343 //go:nosplit 344 func (p *maybeTraceablePtr) setUntraceable() { 345 p.vp = nil 346 } 347 348 // setTraceable resets the pointer to the stored address. 349 // This is used to make the pointer visible to the GC. 350 // 351 //go:nosplit 352 func (p *maybeTraceablePtr) setTraceable() { 353 p.vp = unsafe.Pointer(p.vu) 354 } 355 356 // set sets the pointer to the data element and updates the address. 357 // 358 //go:nosplit 359 func (p *maybeTraceablePtr) set(v unsafe.Pointer) { 360 p.vp = v 361 p.vu = uintptr(v) 362 } 363 364 // get retrieves the pointer to the data element. 365 // 366 //go:nosplit 367 func (p *maybeTraceablePtr) get() unsafe.Pointer { 368 return unsafe.Pointer(p.vu) 369 } 370 371 // uintptr returns the uintptr address of the pointer. 372 // 373 //go:nosplit 374 func (p *maybeTraceablePtr) uintptr() uintptr { 375 return p.vu 376 } 377 378 // maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr) 379 // to track hchan pointers. 380 // 381 // Do not set fields manually. Use methods instead. 382 type maybeTraceableChan struct { 383 maybeTraceablePtr 384 } 385 386 //go:nosplit 387 func (p *maybeTraceableChan) set(c *hchan) { 388 p.maybeTraceablePtr.set(unsafe.Pointer(c)) 389 } 390 391 //go:nosplit 392 func (p *maybeTraceableChan) get() *hchan { 393 return (*hchan)(p.maybeTraceablePtr.get()) 394 } 395 396 // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving 397 // on a channel. 398 // 399 // sudog is necessary because the g ↔ synchronization object relation 400 // is many-to-many. A g can be on many wait lists, so there may be 401 // many sudogs for one g; and many gs may be waiting on the same 402 // synchronization object, so there may be many sudogs for one object. 403 // 404 // sudogs are allocated from a special pool. Use acquireSudog and 405 // releaseSudog to allocate and free them. 406 type sudog struct { 407 // The following fields are protected by the hchan.lock of the 408 // channel this sudog is blocking on. shrinkstack depends on 409 // this for sudogs involved in channel ops. 410 411 g *g 412 413 next *sudog 414 prev *sudog 415 416 elem maybeTraceablePtr // data element (may point to stack) 417 418 // The following fields are never accessed concurrently. 419 // For channels, waitlink is only accessed by g. 420 // For semaphores, all fields (including the ones above) 421 // are only accessed when holding a semaRoot lock. 422 423 acquiretime int64 424 releasetime int64 425 ticket uint32 426 427 // isSelect indicates g is participating in a select, so 428 // g.selectDone must be CAS'd to win the wake-up race. 429 isSelect bool 430 431 // success indicates whether communication over channel c 432 // succeeded. It is true if the goroutine was awoken because a 433 // value was delivered over channel c, and false if awoken 434 // because c was closed. 435 success bool 436 437 // waiters is a count of semaRoot waiting list other than head of list, 438 // clamped to a uint16 to fit in unused space. 439 // Only meaningful at the head of the list. 440 // (If we wanted to be overly clever, we could store a high 16 bits 441 // in the second entry in the list.) 442 waiters uint16 443 444 parent *sudog // semaRoot binary tree 445 waitlink *sudog // g.waiting list or semaRoot 446 waittail *sudog // semaRoot 447 c maybeTraceableChan // channel 448 } 449 450 type libcall struct { 451 fn uintptr 452 n uintptr // number of parameters 453 args uintptr // parameters 454 r1 uintptr // return values 455 r2 uintptr 456 err uintptr // error number 457 } 458 459 // Stack describes a Go execution stack. 460 // The bounds of the stack are exactly [lo, hi), 461 // with no implicit data structures on either side. 462 type stack struct { 463 lo uintptr 464 hi uintptr 465 } 466 467 // heldLockInfo gives info on a held lock and the rank of that lock 468 type heldLockInfo struct { 469 lockAddr uintptr 470 rank lockRank 471 } 472 473 type g struct { 474 // Stack parameters. 475 // stack describes the actual stack memory: [stack.lo, stack.hi). 476 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 477 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 478 // stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue. 479 // It is stack.lo+StackGuard on g0 and gsignal stacks. 480 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 481 stack stack // offset known to runtime/cgo 482 stackguard0 uintptr // offset known to liblink 483 stackguard1 uintptr // offset known to liblink 484 485 _panic *_panic // innermost panic - offset known to liblink 486 _defer *_defer // innermost defer 487 m *m // current m; offset known to arm liblink 488 sched gobuf 489 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 490 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 491 syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback 492 stktopsp uintptr // expected sp at top of stack, to check in traceback 493 // param is a generic pointer parameter field used to pass 494 // values in particular contexts where other storage for the 495 // parameter would be difficult to find. It is currently used 496 // in four ways: 497 // 1. When a channel operation wakes up a blocked goroutine, it sets param to 498 // point to the sudog of the completed blocking operation. 499 // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed 500 // the GC cycle. It is unsafe to do so in any other way, because the goroutine's 501 // stack may have moved in the meantime. 502 // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a 503 // closure in the runtime is forbidden. 504 // 4. When a panic is recovered and control returns to the respective frame, 505 // param may point to a savedOpenDeferState. 506 param unsafe.Pointer 507 atomicstatus atomic.Uint32 508 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 509 goid uint64 510 schedlink guintptr 511 waitsince int64 // approx time when the g become blocked 512 waitreason waitReason // if status==Gwaiting 513 514 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 515 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 516 preemptShrink bool // shrink stack at synchronous safe point 517 518 // asyncSafePoint is set if g is stopped at an asynchronous 519 // safe point. This means there are frames on the stack 520 // without precise pointer information. 521 asyncSafePoint bool 522 523 paniconfault bool // panic (instead of crash) on unexpected fault address 524 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 525 throwsplit bool // must not split stack 526 // activeStackChans indicates that there are unlocked channels 527 // pointing into this goroutine's stack. If true, stack 528 // copying needs to acquire channel locks to protect these 529 // areas of the stack. 530 activeStackChans bool 531 // parkingOnChan indicates that the goroutine is about to 532 // park on a chansend or chanrecv. Used to signal an unsafe point 533 // for stack shrinking. 534 parkingOnChan atomic.Bool 535 // inMarkAssist indicates whether the goroutine is in mark assist. 536 // Used by the execution tracer. 537 inMarkAssist bool 538 coroexit bool // argument to coroswitch_m 539 540 raceignore int8 // ignore race detection events 541 nocgocallback bool // whether disable callback from C 542 tracking bool // whether we're tracking this G for sched latency statistics 543 trackingSeq uint8 // used to decide whether to track this G 544 trackingStamp int64 // timestamp of when the G last started being tracked 545 runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking 546 lockedm muintptr 547 fipsIndicator uint8 548 syncSafePoint bool // set if g is stopped at a synchronous safe point. 549 runningCleanups atomic.Bool 550 sig uint32 551 writebuf []byte 552 sigcode0 uintptr 553 sigcode1 uintptr 554 sigpc uintptr 555 parentGoid uint64 // goid of goroutine that created this goroutine 556 gopc uintptr // pc of go statement that created this goroutine 557 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 558 startpc uintptr // pc of goroutine function 559 racectx uintptr 560 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 561 cgoCtxt []uintptr // cgo traceback context 562 labels unsafe.Pointer // profiler labels 563 timer *timer // cached timer for time.Sleep 564 sleepWhen int64 // when to sleep until 565 selectDone atomic.Uint32 // are we participating in a select and did someone win the race? 566 567 // goroutineProfiled indicates the status of this goroutine's stack for the 568 // current in-progress goroutine profile 569 goroutineProfiled goroutineProfileStateHolder 570 571 coroarg *coro // argument during coroutine transfers 572 bubble *synctestBubble 573 574 // xRegs stores the extended register state if this G has been 575 // asynchronously preempted. 576 xRegs xRegPerG 577 578 // Per-G tracer state. 579 trace gTraceState 580 581 // Per-G GC state 582 583 // gcAssistBytes is this G's GC assist credit in terms of 584 // bytes allocated. If this is positive, then the G has credit 585 // to allocate gcAssistBytes bytes without assisting. If this 586 // is negative, then the G must correct this by performing 587 // scan work. We track this in bytes to make it fast to update 588 // and check for debt in the malloc hot path. The assist ratio 589 // determines how this corresponds to scan work debt. 590 gcAssistBytes int64 591 592 // valgrindStackID is used to track what memory is used for stacks when a program is 593 // built with the "valgrind" build tag, otherwise it is unused. 594 valgrindStackID uintptr 595 } 596 597 // gTrackingPeriod is the number of transitions out of _Grunning between 598 // latency tracking runs. 599 const gTrackingPeriod = 8 600 601 const ( 602 // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms, 603 // like Windows. 604 tlsSlots = 6 605 tlsSize = tlsSlots * goarch.PtrSize 606 ) 607 608 // Values for m.freeWait. 609 const ( 610 freeMStack = 0 // M done, free stack and reference. 611 freeMRef = 1 // M done, free reference. 612 freeMWait = 2 // M still in use. 613 ) 614 615 type m struct { 616 g0 *g // goroutine with scheduling stack 617 morebuf gobuf // gobuf arg to morestack 618 divmod uint32 // div/mod denominator for arm - known to liblink (cmd/internal/obj/arm/obj5.go) 619 620 // Fields whose offsets are not known to debuggers. 621 622 procid uint64 // for debuggers, but offset not hard-coded 623 gsignal *g // signal-handling g 624 goSigStack gsignalStack // Go-allocated signal handling stack 625 sigmask sigset // storage for saved signal mask 626 tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) 627 mstartfn func() 628 curg *g // current running goroutine 629 caughtsig guintptr // goroutine running during fatal signal 630 631 // p is the currently attached P for executing Go code, nil if not executing user Go code. 632 // 633 // A non-nil p implies exclusive ownership of the P, unless curg is in _Gsyscall. 634 // In _Gsyscall the scheduler may mutate this instead. The point of synchronization 635 // is the _Gscan bit on curg's status. The scheduler must arrange to prevent curg 636 // from transitioning out of _Gsyscall if it intends to mutate p. 637 p puintptr 638 639 nextp puintptr // The next P to install before executing. Implies exclusive ownership of this P. 640 oldp puintptr // The P that was attached before executing a syscall. 641 id int64 642 mallocing int32 643 throwing throwType 644 preemptoff string // if != "", keep curg running on this m 645 locks int32 646 dying int32 647 profilehz int32 648 spinning bool // m is out of work and is actively looking for work 649 blocked bool // m is blocked on a note 650 newSigstack bool // minit on C thread called sigaltstack 651 printlock int8 652 incgo bool // m is executing a cgo call 653 isextra bool // m is an extra m 654 isExtraInC bool // m is an extra m that does not have any Go frames 655 isExtraInSig bool // m is an extra m in a signal handler 656 freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) 657 needextram bool 658 g0StackAccurate bool // whether the g0 stack has accurate bounds 659 traceback uint8 660 allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise. 661 ncgocall uint64 // number of cgo calls in total 662 ncgo int32 // number of cgo calls currently in progress 663 cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily 664 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 665 park note 666 alllink *m // on allm 667 schedlink muintptr 668 lockedg guintptr 669 createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. 670 lockedExt uint32 // tracking for external LockOSThread 671 lockedInt uint32 // tracking for internal lockOSThread 672 mWaitList mWaitList // list of runtime lock waiters 673 674 mLockProfile mLockProfile // fields relating to runtime.lock contention 675 profStack []uintptr // used for memory/block/mutex stack traces 676 677 // wait* are used to carry arguments from gopark into park_m, because 678 // there's no stack to put them on. That is their sole purpose. 679 waitunlockf func(*g, unsafe.Pointer) bool 680 waitlock unsafe.Pointer 681 waitTraceSkip int 682 waitTraceBlockReason traceBlockReason 683 684 syscalltick uint32 685 freelink *m // on sched.freem 686 trace mTraceState 687 688 // These are here to avoid using the G stack so the stack can move during the call. 689 libcallpc uintptr // for cpu profiler 690 libcallsp uintptr 691 libcallg guintptr 692 winsyscall winlibcall // stores syscall parameters on windows 693 694 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 695 vdsoPC uintptr // PC for traceback while in VDSO call 696 697 // preemptGen counts the number of completed preemption 698 // signals. This is used to detect when a preemption is 699 // requested, but fails. 700 preemptGen atomic.Uint32 701 702 // Whether this is a pending preemption signal on this M. 703 signalPending atomic.Uint32 704 705 // pcvalue lookup cache 706 pcvalueCache pcvalueCache 707 708 dlogPerM 709 710 mOS 711 712 chacha8 chacha8rand.State 713 cheaprand uint64 714 715 // Up to 10 locks held by this m, maintained by the lock ranking code. 716 locksHeldLen int 717 locksHeld [10]heldLockInfo 718 } 719 720 const mRedZoneSize = (16 << 3) * asanenabledBit // redZoneSize(2048) 721 722 type mPadded struct { 723 m 724 725 // Size the runtime.m structure so it fits in the 2048-byte size class, and 726 // not in the next-smallest (1792-byte) size class. That leaves the 11 low 727 // bits of muintptr values available for flags, as required by 728 // lock_spinbit.go. 729 _ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte 730 } 731 732 type p struct { 733 id int32 734 status uint32 // one of pidle/prunning/... 735 link puintptr 736 schedtick uint32 // incremented on every scheduler call 737 syscalltick uint32 // incremented on every system call 738 sysmontick sysmontick // last tick observed by sysmon 739 m muintptr // back-link to associated m (nil if idle) 740 mcache *mcache 741 pcache pageCache 742 raceprocctx uintptr 743 744 deferpool []*_defer // pool of available defer structs (see panic.go) 745 deferpoolbuf [32]*_defer 746 747 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 748 goidcache uint64 749 goidcacheend uint64 750 751 // Queue of runnable goroutines. Accessed without lock. 752 runqhead uint32 753 runqtail uint32 754 runq [256]guintptr 755 // runnext, if non-nil, is a runnable G that was ready'd by 756 // the current G and should be run next instead of what's in 757 // runq if there's time remaining in the running G's time 758 // slice. It will inherit the time left in the current time 759 // slice. If a set of goroutines is locked in a 760 // communicate-and-wait pattern, this schedules that set as a 761 // unit and eliminates the (potentially large) scheduling 762 // latency that otherwise arises from adding the ready'd 763 // goroutines to the end of the run queue. 764 // 765 // Note that while other P's may atomically CAS this to zero, 766 // only the owner P can CAS it to a valid G. 767 runnext guintptr 768 769 // Available G's (status == Gdead) 770 gFree gList 771 772 sudogcache []*sudog 773 sudogbuf [128]*sudog 774 775 // Cache of mspan objects from the heap. 776 mspancache struct { 777 // We need an explicit length here because this field is used 778 // in allocation codepaths where write barriers are not allowed, 779 // and eliminating the write barrier/keeping it eliminated from 780 // slice updates is tricky, more so than just managing the length 781 // ourselves. 782 len int 783 buf [128]*mspan 784 } 785 786 // Cache of a single pinner object to reduce allocations from repeated 787 // pinner creation. 788 pinnerCache *pinner 789 790 trace pTraceState 791 792 palloc persistentAlloc // per-P to avoid mutex 793 794 // Per-P GC state 795 gcAssistTime int64 // Nanoseconds in assistAlloc 796 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) 797 798 // limiterEvent tracks events for the GC CPU limiter. 799 limiterEvent limiterEvent 800 801 // gcMarkWorkerMode is the mode for the next mark worker to run in. 802 // That is, this is used to communicate with the worker goroutine 803 // selected for immediate execution by 804 // gcController.findRunnableGCWorker. When scheduling other goroutines, 805 // this field must be set to gcMarkWorkerNotWorker. 806 gcMarkWorkerMode gcMarkWorkerMode 807 // gcMarkWorkerStartTime is the nanotime() at which the most recent 808 // mark worker started. 809 gcMarkWorkerStartTime int64 810 811 // gcw is this P's GC work buffer cache. The work buffer is 812 // filled by write barriers, drained by mutator assists, and 813 // disposed on certain GC state transitions. 814 gcw gcWork 815 816 // wbBuf is this P's GC write barrier buffer. 817 // 818 // TODO: Consider caching this in the running G. 819 wbBuf wbBuf 820 821 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 822 823 // statsSeq is a counter indicating whether this P is currently 824 // writing any stats. Its value is even when not, odd when it is. 825 statsSeq atomic.Uint32 826 827 // Timer heap. 828 timers timers 829 830 // Cleanups. 831 cleanups *cleanupBlock 832 cleanupsQueued uint64 // monotonic count of cleanups queued by this P 833 834 // maxStackScanDelta accumulates the amount of stack space held by 835 // live goroutines (i.e. those eligible for stack scanning). 836 // Flushed to gcController.maxStackScan once maxStackScanSlack 837 // or -maxStackScanSlack is reached. 838 maxStackScanDelta int64 839 840 // gc-time statistics about current goroutines 841 // Note that this differs from maxStackScan in that this 842 // accumulates the actual stack observed to be used at GC time (hi - sp), 843 // not an instantaneous measure of the total stack size that might need 844 // to be scanned (hi - lo). 845 scannedStackSize uint64 // stack size of goroutines scanned by this P 846 scannedStacks uint64 // number of goroutines scanned by this P 847 848 // preempt is set to indicate that this P should be enter the 849 // scheduler ASAP (regardless of what G is running on it). 850 preempt bool 851 852 // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. 853 gcStopTime int64 854 855 // goroutinesCreated is the total count of goroutines created by this P. 856 goroutinesCreated uint64 857 858 // xRegs is the per-P extended register state used by asynchronous 859 // preemption. This is an empty struct on platforms that don't use extended 860 // register state. 861 xRegs xRegPerP 862 863 // Padding is no longer needed. False sharing is now not a worry because p is large enough 864 // that its size class is an integer multiple of the cache line size (for any of our architectures). 865 } 866 867 type schedt struct { 868 goidgen atomic.Uint64 869 lastpoll atomic.Int64 // time of last network poll, 0 if currently polling 870 pollUntil atomic.Int64 // time to which current poll is sleeping 871 pollingNet atomic.Int32 // 1 if some P doing non-blocking network poll 872 873 lock mutex 874 875 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 876 // sure to call checkdead(). 877 878 midle muintptr // idle m's waiting for work 879 nmidle int32 // number of idle m's waiting for work 880 nmidlelocked int32 // number of locked m's waiting for work 881 mnext int64 // number of m's that have been created and next M ID 882 maxmcount int32 // maximum number of m's allowed (or die) 883 nmsys int32 // number of system m's not counted for deadlock 884 nmfreed int64 // cumulative number of freed m's 885 886 ngsys atomic.Int32 // number of system goroutines 887 nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P 888 889 pidle puintptr // idle p's 890 npidle atomic.Int32 891 nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go. 892 needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1. 893 894 // Global runnable queue. 895 runq gQueue 896 897 // disable controls selective disabling of the scheduler. 898 // 899 // Use schedEnableUser to control this. 900 // 901 // disable is protected by sched.lock. 902 disable struct { 903 // user disables scheduling of user goroutines. 904 user bool 905 runnable gQueue // pending runnable Gs 906 } 907 908 // Global cache of dead G's. 909 gFree struct { 910 lock mutex 911 stack gList // Gs with stacks 912 noStack gList // Gs without stacks 913 } 914 915 // Central cache of sudog structs. 916 sudoglock mutex 917 sudogcache *sudog 918 919 // Central pool of available defer structs. 920 deferlock mutex 921 deferpool *_defer 922 923 // freem is the list of m's waiting to be freed when their 924 // m.exited is set. Linked through m.freelink. 925 freem *m 926 927 gcwaiting atomic.Bool // gc is waiting to run 928 stopwait int32 929 stopnote note 930 sysmonwait atomic.Bool 931 sysmonnote note 932 933 // safePointFn should be called on each P at the next GC 934 // safepoint if p.runSafePointFn is set. 935 safePointFn func(*p) 936 safePointWait int32 937 safePointNote note 938 939 profilehz int32 // cpu profiling rate 940 941 procresizetime int64 // nanotime() of last change to gomaxprocs 942 totaltime int64 // ∫gomaxprocs dt up to procresizetime 943 944 customGOMAXPROCS bool // GOMAXPROCS was manually set from the environment or runtime.GOMAXPROCS 945 946 // sysmonlock protects sysmon's actions on the runtime. 947 // 948 // Acquire and hold this mutex to block sysmon from interacting 949 // with the rest of the runtime. 950 sysmonlock mutex 951 952 // timeToRun is a distribution of scheduling latencies, defined 953 // as the sum of time a G spends in the _Grunnable state before 954 // it transitions to _Grunning. 955 timeToRun timeHistogram 956 957 // idleTime is the total CPU time Ps have "spent" idle. 958 // 959 // Reset on each GC cycle. 960 idleTime atomic.Int64 961 962 // totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting 963 // with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock. 964 totalMutexWaitTime atomic.Int64 965 966 // stwStoppingTimeGC/Other are distributions of stop-the-world stopping 967 // latencies, defined as the time taken by stopTheWorldWithSema to get 968 // all Ps to stop. stwStoppingTimeGC covers all GC-related STWs, 969 // stwStoppingTimeOther covers the others. 970 stwStoppingTimeGC timeHistogram 971 stwStoppingTimeOther timeHistogram 972 973 // stwTotalTimeGC/Other are distributions of stop-the-world total 974 // latencies, defined as the total time from stopTheWorldWithSema to 975 // startTheWorldWithSema. This is a superset of 976 // stwStoppingTimeGC/Other. stwTotalTimeGC covers all GC-related STWs, 977 // stwTotalTimeOther covers the others. 978 stwTotalTimeGC timeHistogram 979 stwTotalTimeOther timeHistogram 980 981 // totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in 982 // allm) is the sum of time goroutines have spent in _Grunnable and with an 983 // M, but waiting for locks within the runtime. This field stores the value 984 // for Ms that have exited. 985 totalRuntimeLockWaitTime atomic.Int64 986 987 // goroutinesCreated (plus the value of goroutinesCreated on each P in allp) 988 // is the sum of all goroutines created by the program. 989 goroutinesCreated atomic.Uint64 990 } 991 992 // Values for the flags field of a sigTabT. 993 const ( 994 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 995 _SigKill // if signal.Notify doesn't take it, exit quietly 996 _SigThrow // if signal.Notify doesn't take it, exit loudly 997 _SigPanic // if the signal is from the kernel, panic 998 _SigDefault // if the signal isn't explicitly requested, don't monitor it 999 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 1000 _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler 1001 _SigUnblock // always unblock; see blockableSig 1002 _SigIgn // _SIG_DFL action is to ignore the signal 1003 ) 1004 1005 // Layout of in-memory per-function information prepared by linker 1006 // See https://golang.org/s/go12symtab. 1007 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 1008 // and with package debug/gosym and with symtab.go in package runtime. 1009 type _func struct { 1010 sys.NotInHeap // Only in static data 1011 1012 entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart 1013 nameOff int32 // function name, as index into moduledata.funcnametab. 1014 1015 args int32 // in/out args size 1016 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. 1017 1018 pcsp uint32 1019 pcfile uint32 1020 pcln uint32 1021 npcdata uint32 1022 cuOffset uint32 // runtime.cutab offset of this function's CU 1023 startLine int32 // line number of start of function (func keyword/TEXT directive) 1024 funcID abi.FuncID // set for certain special runtime functions 1025 flag abi.FuncFlag 1026 _ [1]byte // pad 1027 nfuncdata uint8 // must be last, must end on a uint32-aligned boundary 1028 1029 // The end of the struct is followed immediately by two variable-length 1030 // arrays that reference the pcdata and funcdata locations for this 1031 // function. 1032 1033 // pcdata contains the offset into moduledata.pctab for the start of 1034 // that index's table. e.g., 1035 // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of 1036 // the unsafe point table. 1037 // 1038 // An offset of 0 indicates that there is no table. 1039 // 1040 // pcdata [npcdata]uint32 1041 1042 // funcdata contains the offset past moduledata.gofunc which contains a 1043 // pointer to that index's funcdata. e.g., 1044 // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is 1045 // the argument pointer map. 1046 // 1047 // An offset of ^uint32(0) indicates that there is no entry. 1048 // 1049 // funcdata [nfuncdata]uint32 1050 } 1051 1052 // Pseudo-Func that is returned for PCs that occur in inlined code. 1053 // A *Func can be either a *_func or a *funcinl, and they are distinguished 1054 // by the first uintptr. 1055 // 1056 // TODO(austin): Can we merge this with inlinedCall? 1057 type funcinl struct { 1058 ones uint32 // set to ^0 to distinguish from _func 1059 entry uintptr // entry of the real (the "outermost") frame 1060 name string 1061 file string 1062 line int32 1063 startLine int32 1064 } 1065 1066 type itab = abi.ITab 1067 1068 // Lock-free stack node. 1069 // Also known to export_test.go. 1070 type lfnode struct { 1071 next uint64 1072 pushcnt uintptr 1073 } 1074 1075 type forcegcstate struct { 1076 lock mutex 1077 g *g 1078 idle atomic.Bool 1079 } 1080 1081 // A _defer holds an entry on the list of deferred calls. 1082 // If you add a field here, add code to clear it in deferProcStack. 1083 // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct 1084 // and cmd/compile/internal/ssagen/ssa.go:(*state).call. 1085 // Some defers will be allocated on the stack and some on the heap. 1086 // All defers are logically part of the stack, so write barriers to 1087 // initialize them are not required. All defers must be manually scanned, 1088 // and for heap defers, marked. 1089 type _defer struct { 1090 heap bool 1091 rangefunc bool // true for rangefunc list 1092 sp uintptr // sp at time of defer 1093 fn func() // can be nil for open-coded defers 1094 link *_defer // next defer on G; can point to either heap or stack! 1095 1096 // If rangefunc is true, *head is the head of the atomic linked list 1097 // during a range-over-func execution. 1098 head *atomic.Pointer[_defer] 1099 } 1100 1101 // A _panic holds information about an active panic. 1102 // 1103 // A _panic value must only ever live on the stack. 1104 // 1105 // The gopanicFP and link fields are stack pointers, but don't need special 1106 // handling during stack growth: because they are pointer-typed and 1107 // _panic values only live on the stack, regular stack pointer 1108 // adjustment takes care of them. 1109 type _panic struct { 1110 arg any // argument to panic 1111 link *_panic // link to earlier panic 1112 1113 // startPC and startSP track where _panic.start was called. 1114 startPC uintptr 1115 startSP unsafe.Pointer 1116 1117 // The current stack frame that we're running deferred calls for. 1118 sp unsafe.Pointer 1119 lr uintptr 1120 fp unsafe.Pointer 1121 1122 // retpc stores the PC where the panic should jump back to, if the 1123 // function last returned by _panic.next() recovers the panic. 1124 retpc uintptr 1125 1126 // Extra state for handling open-coded defers. 1127 deferBitsPtr *uint8 1128 slotsPtr unsafe.Pointer 1129 1130 recovered bool // whether this panic has been recovered 1131 repanicked bool // whether this panic repanicked 1132 goexit bool 1133 deferreturn bool 1134 1135 gopanicFP unsafe.Pointer // frame pointer of the gopanic frame 1136 } 1137 1138 // savedOpenDeferState tracks the extra state from _panic that's 1139 // necessary for deferreturn to pick up where gopanic left off, 1140 // without needing to unwind the stack. 1141 type savedOpenDeferState struct { 1142 retpc uintptr 1143 deferBitsOffset uintptr 1144 slotsOffset uintptr 1145 } 1146 1147 // ancestorInfo records details of where a goroutine was started. 1148 type ancestorInfo struct { 1149 pcs []uintptr // pcs from the stack of this goroutine 1150 goid uint64 // goroutine id of this goroutine; original goroutine possibly dead 1151 gopc uintptr // pc of go statement that created this goroutine 1152 } 1153 1154 // A waitReason explains why a goroutine has been stopped. 1155 // See gopark. Do not re-use waitReasons, add new ones. 1156 type waitReason uint8 1157 1158 const ( 1159 waitReasonZero waitReason = iota // "" 1160 waitReasonGCAssistMarking // "GC assist marking" 1161 waitReasonIOWait // "IO wait" 1162 waitReasonDumpingHeap // "dumping heap" 1163 waitReasonGarbageCollection // "garbage collection" 1164 waitReasonGarbageCollectionScan // "garbage collection scan" 1165 waitReasonPanicWait // "panicwait" 1166 waitReasonGCAssistWait // "GC assist wait" 1167 waitReasonGCSweepWait // "GC sweep wait" 1168 waitReasonGCScavengeWait // "GC scavenge wait" 1169 waitReasonFinalizerWait // "finalizer wait" 1170 waitReasonForceGCIdle // "force gc (idle)" 1171 waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)" 1172 waitReasonSemacquire // "semacquire" 1173 waitReasonSleep // "sleep" 1174 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 1175 waitReasonChanSendNilChan // "chan send (nil chan)" 1176 waitReasonSelectNoCases // "select (no cases)" 1177 waitReasonSelect // "select" 1178 waitReasonChanReceive // "chan receive" 1179 waitReasonChanSend // "chan send" 1180 waitReasonSyncCondWait // "sync.Cond.Wait" 1181 waitReasonSyncMutexLock // "sync.Mutex.Lock" 1182 waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock" 1183 waitReasonSyncRWMutexLock // "sync.RWMutex.Lock" 1184 waitReasonSyncWaitGroupWait // "sync.WaitGroup.Wait" 1185 waitReasonTraceReaderBlocked // "trace reader (blocked)" 1186 waitReasonWaitForGCCycle // "wait for GC cycle" 1187 waitReasonGCWorkerIdle // "GC worker (idle)" 1188 waitReasonGCWorkerActive // "GC worker (active)" 1189 waitReasonPreempted // "preempted" 1190 waitReasonDebugCall // "debug call" 1191 waitReasonGCMarkTermination // "GC mark termination" 1192 waitReasonStoppingTheWorld // "stopping the world" 1193 waitReasonFlushProcCaches // "flushing proc caches" 1194 waitReasonTraceGoroutineStatus // "trace goroutine status" 1195 waitReasonTraceProcStatus // "trace proc status" 1196 waitReasonPageTraceFlush // "page trace flush" 1197 waitReasonCoroutine // "coroutine" 1198 waitReasonGCWeakToStrongWait // "GC weak to strong wait" 1199 waitReasonSynctestRun // "synctest.Run" 1200 waitReasonSynctestWait // "synctest.Wait" 1201 waitReasonSynctestChanReceive // "chan receive (durable)" 1202 waitReasonSynctestChanSend // "chan send (durable)" 1203 waitReasonSynctestSelect // "select (durable)" 1204 waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (durable)" 1205 waitReasonCleanupWait // "cleanup wait" 1206 ) 1207 1208 var waitReasonStrings = [...]string{ 1209 waitReasonZero: "", 1210 waitReasonGCAssistMarking: "GC assist marking", 1211 waitReasonIOWait: "IO wait", 1212 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 1213 waitReasonChanSendNilChan: "chan send (nil chan)", 1214 waitReasonDumpingHeap: "dumping heap", 1215 waitReasonGarbageCollection: "garbage collection", 1216 waitReasonGarbageCollectionScan: "garbage collection scan", 1217 waitReasonPanicWait: "panicwait", 1218 waitReasonSelect: "select", 1219 waitReasonSelectNoCases: "select (no cases)", 1220 waitReasonGCAssistWait: "GC assist wait", 1221 waitReasonGCSweepWait: "GC sweep wait", 1222 waitReasonGCScavengeWait: "GC scavenge wait", 1223 waitReasonChanReceive: "chan receive", 1224 waitReasonChanSend: "chan send", 1225 waitReasonFinalizerWait: "finalizer wait", 1226 waitReasonForceGCIdle: "force gc (idle)", 1227 waitReasonUpdateGOMAXPROCSIdle: "GOMAXPROCS updater (idle)", 1228 waitReasonSemacquire: "semacquire", 1229 waitReasonSleep: "sleep", 1230 waitReasonSyncCondWait: "sync.Cond.Wait", 1231 waitReasonSyncMutexLock: "sync.Mutex.Lock", 1232 waitReasonSyncRWMutexRLock: "sync.RWMutex.RLock", 1233 waitReasonSyncRWMutexLock: "sync.RWMutex.Lock", 1234 waitReasonSyncWaitGroupWait: "sync.WaitGroup.Wait", 1235 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1236 waitReasonWaitForGCCycle: "wait for GC cycle", 1237 waitReasonGCWorkerIdle: "GC worker (idle)", 1238 waitReasonGCWorkerActive: "GC worker (active)", 1239 waitReasonPreempted: "preempted", 1240 waitReasonDebugCall: "debug call", 1241 waitReasonGCMarkTermination: "GC mark termination", 1242 waitReasonStoppingTheWorld: "stopping the world", 1243 waitReasonFlushProcCaches: "flushing proc caches", 1244 waitReasonTraceGoroutineStatus: "trace goroutine status", 1245 waitReasonTraceProcStatus: "trace proc status", 1246 waitReasonPageTraceFlush: "page trace flush", 1247 waitReasonCoroutine: "coroutine", 1248 waitReasonGCWeakToStrongWait: "GC weak to strong wait", 1249 waitReasonSynctestRun: "synctest.Run", 1250 waitReasonSynctestWait: "synctest.Wait", 1251 waitReasonSynctestChanReceive: "chan receive (durable)", 1252 waitReasonSynctestChanSend: "chan send (durable)", 1253 waitReasonSynctestSelect: "select (durable)", 1254 waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (durable)", 1255 waitReasonCleanupWait: "cleanup wait", 1256 } 1257 1258 func (w waitReason) String() string { 1259 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1260 return "unknown wait reason" 1261 } 1262 return waitReasonStrings[w] 1263 } 1264 1265 // isMutexWait returns true if the goroutine is blocked because of 1266 // sync.Mutex.Lock or sync.RWMutex.[R]Lock. 1267 // 1268 //go:nosplit 1269 func (w waitReason) isMutexWait() bool { 1270 return w == waitReasonSyncMutexLock || 1271 w == waitReasonSyncRWMutexRLock || 1272 w == waitReasonSyncRWMutexLock 1273 } 1274 1275 // isSyncWait returns true if the goroutine is blocked because of 1276 // sync library primitive operations. 1277 // 1278 //go:nosplit 1279 func (w waitReason) isSyncWait() bool { 1280 return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait 1281 } 1282 1283 // isChanWait is true if the goroutine is blocked because of non-nil 1284 // channel operations or a select statement with at least one case. 1285 // 1286 //go:nosplit 1287 func (w waitReason) isChanWait() bool { 1288 return w == waitReasonSelect || 1289 w == waitReasonChanReceive || 1290 w == waitReasonChanSend 1291 } 1292 1293 func (w waitReason) isWaitingForSuspendG() bool { 1294 return isWaitingForSuspendG[w] 1295 } 1296 1297 // isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and 1298 // setting a waitReason because it needs to be able to let the suspendG 1299 // (used by the GC and the execution tracer) take ownership of its stack. 1300 // The G is always actually executing on the system stack in these cases. 1301 // 1302 // TODO(mknyszek): Consider replacing this with a new dedicated G status. 1303 var isWaitingForSuspendG = [len(waitReasonStrings)]bool{ 1304 waitReasonStoppingTheWorld: true, 1305 waitReasonGCMarkTermination: true, 1306 waitReasonGarbageCollection: true, 1307 waitReasonGarbageCollectionScan: true, 1308 waitReasonTraceGoroutineStatus: true, 1309 waitReasonTraceProcStatus: true, 1310 waitReasonPageTraceFlush: true, 1311 waitReasonGCAssistMarking: true, 1312 waitReasonGCWorkerActive: true, 1313 waitReasonFlushProcCaches: true, 1314 } 1315 1316 func (w waitReason) isIdleInSynctest() bool { 1317 return isIdleInSynctest[w] 1318 } 1319 1320 // isIdleInSynctest indicates that a goroutine is considered idle by synctest.Wait. 1321 var isIdleInSynctest = [len(waitReasonStrings)]bool{ 1322 waitReasonChanReceiveNilChan: true, 1323 waitReasonChanSendNilChan: true, 1324 waitReasonSelectNoCases: true, 1325 waitReasonSleep: true, 1326 waitReasonSyncCondWait: true, 1327 waitReasonSynctestWaitGroupWait: true, 1328 waitReasonCoroutine: true, 1329 waitReasonSynctestRun: true, 1330 waitReasonSynctestWait: true, 1331 waitReasonSynctestChanReceive: true, 1332 waitReasonSynctestChanSend: true, 1333 waitReasonSynctestSelect: true, 1334 } 1335 1336 var ( 1337 // Linked-list of all Ms. Written under sched.lock, read atomically. 1338 allm *m 1339 1340 gomaxprocs int32 1341 numCPUStartup int32 1342 forcegc forcegcstate 1343 sched schedt 1344 newprocs int32 1345 ) 1346 1347 var ( 1348 // allpLock protects P-less reads and size changes of allp, idlepMask, 1349 // and timerpMask, and all writes to allp. 1350 allpLock mutex 1351 1352 // len(allp) == gomaxprocs; may change at safe points, otherwise 1353 // immutable. 1354 allp []*p 1355 1356 // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must 1357 // be atomic. Length may change at safe points. 1358 // 1359 // Each P must update only its own bit. In order to maintain 1360 // consistency, a P going idle must set the idle mask simultaneously with 1361 // updates to the idle P list under the sched.lock, otherwise a racing 1362 // pidleget may clear the mask before pidleput sets the mask, 1363 // corrupting the bitmap. 1364 // 1365 // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema. 1366 idlepMask pMask 1367 1368 // Bitmask of Ps that may have a timer, one bit per P. Reads and writes 1369 // must be atomic. Length may change at safe points. 1370 // 1371 // Ideally, the timer mask would be kept immediately consistent on any timer 1372 // operations. Unfortunately, updating a shared global data structure in the 1373 // timer hot path adds too much overhead in applications frequently switching 1374 // between no timers and some timers. 1375 // 1376 // As a compromise, the timer mask is updated only on pidleget / pidleput. A 1377 // running P (returned by pidleget) may add a timer at any time, so its mask 1378 // must be set. An idle P (passed to pidleput) cannot add new timers while 1379 // idle, so if it has no timers at that time, its mask may be cleared. 1380 // 1381 // Thus, we get the following effects on timer-stealing in findrunnable: 1382 // 1383 // - Idle Ps with no timers when they go idle are never checked in findrunnable 1384 // (for work- or timer-stealing; this is the ideal case). 1385 // - Running Ps must always be checked. 1386 // - Idle Ps whose timers are stolen must continue to be checked until they run 1387 // again, even after timer expiration. 1388 // 1389 // When the P starts running again, the mask should be set, as a timer may be 1390 // added at any time. 1391 // 1392 // TODO(prattmic): Additional targeted updates may improve the above cases. 1393 // e.g., updating the mask when stealing a timer. 1394 timerpMask pMask 1395 ) 1396 1397 // goarmsoftfp is used by runtime/cgo assembly. 1398 // 1399 //go:linkname goarmsoftfp 1400 1401 var ( 1402 // Pool of GC parked background workers. Entries are type 1403 // *gcBgMarkWorkerNode. 1404 gcBgMarkWorkerPool lfstack 1405 1406 // Total number of gcBgMarkWorker goroutines. Protected by worldsema. 1407 gcBgMarkWorkerCount int32 1408 1409 // Information about what cpu features are available. 1410 // Packages outside the runtime should not use these 1411 // as they are not an external api. 1412 // Set on startup in asm_{386,amd64}.s 1413 processorVersionInfo uint32 1414 isIntel bool 1415 ) 1416 1417 // set by cmd/link on arm systems 1418 // accessed using linkname by internal/runtime/atomic. 1419 // 1420 // goarm should be an internal detail, 1421 // but widely used packages access it using linkname. 1422 // Notable members of the hall of shame include: 1423 // - github.com/creativeprojects/go-selfupdate 1424 // 1425 // Do not remove or change the type signature. 1426 // See go.dev/issue/67401. 1427 // 1428 //go:linkname goarm 1429 var ( 1430 goarm uint8 1431 goarmsoftfp uint8 1432 ) 1433 1434 // Set by the linker so the runtime can determine the buildmode. 1435 var ( 1436 islibrary bool // -buildmode=c-shared 1437 isarchive bool // -buildmode=c-archive 1438 ) 1439 1440 // Must agree with internal/buildcfg.FramePointerEnabled. 1441 const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" 1442 1443 // getcallerfp returns the frame pointer of the caller of the caller 1444 // of this function. 1445 // 1446 //go:nosplit 1447 //go:noinline 1448 func getcallerfp() uintptr { 1449 fp := getfp() // This frame's FP. 1450 if fp != 0 { 1451 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's FP. 1452 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's caller's FP. 1453 } 1454 return fp 1455 } 1456