Source file src/runtime/runtime2.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/chacha8rand" 10 "internal/goarch" 11 "internal/runtime/atomic" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // defined constants 17 const ( 18 // G status 19 // 20 // Beyond indicating the general state of a G, the G status 21 // acts like a lock on the goroutine's stack (and hence its 22 // ability to execute user code). 23 // 24 // If you add to this list, add to the list 25 // of "okay during garbage collection" status 26 // in mgcmark.go too. 27 // 28 // TODO(austin): The _Gscan bit could be much lighter-weight. 29 // For example, we could choose not to run _Gscanrunnable 30 // goroutines found in the run queue, rather than CAS-looping 31 // until they become _Grunnable. And transitions like 32 // _Gscanwaiting -> _Gscanrunnable are actually okay because 33 // they don't affect stack ownership. 34 35 // _Gidle means this goroutine was just allocated and has not 36 // yet been initialized. 37 _Gidle = iota // 0 38 39 // _Grunnable means this goroutine is on a run queue. It is 40 // not currently executing user code. The stack is not owned. 41 _Grunnable // 1 42 43 // _Grunning means this goroutine may execute user code. The 44 // stack is owned by this goroutine. It is not on a run queue. 45 // It is assigned an M (g.m is valid) and it usually has a P 46 // (g.m.p is valid), but there are small windows of time where 47 // it might not, namely upon entering and exiting _Gsyscall. 48 _Grunning // 2 49 50 // _Gsyscall means this goroutine is executing a system call. 51 // It is not executing user code. The stack is owned by this 52 // goroutine. It is not on a run queue. It is assigned an M. 53 // It may have a P attached, but it does not own it. Code 54 // executing in this state must not touch g.m.p. 55 _Gsyscall // 3 56 57 // _Gwaiting means this goroutine is blocked in the runtime. 58 // It is not executing user code. It is not on a run queue, 59 // but should be recorded somewhere (e.g., a channel wait 60 // queue) so it can be ready()d when necessary. The stack is 61 // not owned *except* that a channel operation may read or 62 // write parts of the stack under the appropriate channel 63 // lock. Otherwise, it is not safe to access the stack after a 64 // goroutine enters _Gwaiting (e.g., it may get moved). 65 _Gwaiting // 4 66 67 // _Gmoribund_unused is currently unused, but hardcoded in gdb 68 // scripts. 69 _Gmoribund_unused // 5 70 71 // _Gdead means this goroutine is currently unused. It may be 72 // just exited, on a free list, or just being initialized. It 73 // is not executing user code. It may or may not have a stack 74 // allocated. The G and its stack (if any) are owned by the M 75 // that is exiting the G or that obtained the G from the free 76 // list. 77 _Gdead // 6 78 79 // _Genqueue_unused is currently unused. 80 _Genqueue_unused // 7 81 82 // _Gcopystack means this goroutine's stack is being moved. It 83 // is not executing user code and is not on a run queue. The 84 // stack is owned by the goroutine that put it in _Gcopystack. 85 _Gcopystack // 8 86 87 // _Gpreempted means this goroutine stopped itself for a 88 // suspendG preemption. It is like _Gwaiting, but nothing is 89 // yet responsible for ready()ing it. Some suspendG must CAS 90 // the status to _Gwaiting to take responsibility for 91 // ready()ing this G. 92 _Gpreempted // 9 93 94 // _Gleaked represents a leaked goroutine caught by the GC. 95 _Gleaked // 10 96 97 // _Gdeadextra is a _Gdead goroutine that's attached to an extra M 98 // used for cgo callbacks. 99 _Gdeadextra // 11 100 101 // _Gscan combined with one of the above states other than 102 // _Grunning indicates that GC is scanning the stack. The 103 // goroutine is not executing user code and the stack is owned 104 // by the goroutine that set the _Gscan bit. 105 // 106 // _Gscanrunning is different: it is used to briefly block 107 // state transitions while GC signals the G to scan its own 108 // stack. This is otherwise like _Grunning. 109 // 110 // atomicstatus&~Gscan gives the state the goroutine will 111 // return to when the scan completes. 112 _Gscan = 0x1000 113 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 114 _Gscanrunning = _Gscan + _Grunning // 0x1002 115 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 116 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 117 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 118 _Gscanleaked = _Gscan + _Gleaked // 0x100a 119 _Gscandeadextra = _Gscan + _Gdeadextra // 0x100b 120 ) 121 122 const ( 123 // P status 124 125 // _Pidle means a P is not being used to run user code or the 126 // scheduler. Typically, it's on the idle P list and available 127 // to the scheduler, but it may just be transitioning between 128 // other states. 129 // 130 // The P is owned by the idle list or by whatever is 131 // transitioning its state. Its run queue is empty. 132 _Pidle = iota 133 134 // _Prunning means a P is owned by an M and is being used to 135 // run user code or the scheduler. Only the M that owns this P 136 // is allowed to change the P's status from _Prunning. The M 137 // may transition the P to _Pidle (if it has no more work to 138 // do), or _Pgcstop (to halt for the GC). The M may also hand 139 // ownership of the P off directly to another M (for example, 140 // to schedule a locked G). 141 _Prunning 142 143 // _Psyscall_unused is a now-defunct state for a P. A P is 144 // identified as "in a system call" by looking at the goroutine's 145 // state. 146 _Psyscall_unused 147 148 // _Pgcstop means a P is halted for STW and owned by the M 149 // that stopped the world. The M that stopped the world 150 // continues to use its P, even in _Pgcstop. Transitioning 151 // from _Prunning to _Pgcstop causes an M to release its P and 152 // park. 153 // 154 // The P retains its run queue and startTheWorld will restart 155 // the scheduler on Ps with non-empty run queues. 156 _Pgcstop 157 158 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 159 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 160 // stripped of its resources, though a few things remain 161 // (e.g., trace buffers). 162 _Pdead 163 ) 164 165 // Mutual exclusion locks. In the uncontended case, 166 // as fast as spin locks (just a few user-level instructions), 167 // but on the contention path they sleep in the kernel. 168 // A zeroed Mutex is unlocked (no need to initialize each lock). 169 // Initialization is helpful for static lock ranking, but not required. 170 type mutex struct { 171 // Empty struct if lock ranking is disabled, otherwise includes the lock rank 172 lockRankStruct 173 // Futex-based impl treats it as uint32 key, 174 // while sema-based impl as M* waitm. 175 // Used to be a union, but unions break precise GC. 176 key uintptr 177 } 178 179 type funcval struct { 180 fn uintptr 181 // variable-size, fn-specific data here 182 } 183 184 type iface struct { 185 tab *itab 186 data unsafe.Pointer 187 } 188 189 type eface struct { 190 _type *_type 191 data unsafe.Pointer 192 } 193 194 func efaceOf(ep *any) *eface { 195 return (*eface)(unsafe.Pointer(ep)) 196 } 197 198 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 199 // It is particularly important to avoid write barriers when the current P has 200 // been released, because the GC thinks the world is stopped, and an 201 // unexpected write barrier would not be synchronized with the GC, 202 // which can lead to a half-executed write barrier that has marked the object 203 // but not queued it. If the GC skips the object and completes before the 204 // queuing can occur, it will incorrectly free the object. 205 // 206 // We tried using special assignment functions invoked only when not 207 // holding a running P, but then some updates to a particular memory 208 // word went through write barriers and some did not. This breaks the 209 // write barrier shadow checking mode, and it is also scary: better to have 210 // a word that is completely ignored by the GC than to have one for which 211 // only a few updates are ignored. 212 // 213 // Gs and Ps are always reachable via true pointers in the 214 // allgs and allp lists or (during allocation before they reach those lists) 215 // from stack variables. 216 // 217 // Ms are always reachable via true pointers either from allm or 218 // freem. Unlike Gs and Ps we do free Ms, so it's important that 219 // nothing ever hold an muintptr across a safe point. 220 221 // A guintptr holds a goroutine pointer, but typed as a uintptr 222 // to bypass write barriers. It is used in the Gobuf goroutine state 223 // and in scheduling lists that are manipulated without a P. 224 // 225 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 226 // In one of the few places it is updated by Go code - func save - it must be 227 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 228 // Instead of figuring out how to emit the write barriers missing in the 229 // assembly manipulation, we change the type of the field to uintptr, 230 // so that it does not require write barriers at all. 231 // 232 // Goroutine structs are published in the allg list and never freed. 233 // That will keep the goroutine structs from being collected. 234 // There is never a time that Gobuf.g's contain the only references 235 // to a goroutine: the publishing of the goroutine in allg comes first. 236 // Goroutine pointers are also kept in non-GC-visible places like TLS, 237 // so I can't see them ever moving. If we did want to start moving data 238 // in the GC, we'd need to allocate the goroutine structs from an 239 // alternate arena. Using guintptr doesn't make that problem any worse. 240 // Note that pollDesc.rg, pollDesc.wg also store g in uintptr form, 241 // so they would need to be updated too if g's start moving. 242 type guintptr uintptr 243 244 //go:nosplit 245 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 246 247 //go:nosplit 248 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 249 250 //go:nosplit 251 func (gp *guintptr) cas(old, new guintptr) bool { 252 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 253 } 254 255 //go:nosplit 256 func (gp *g) guintptr() guintptr { 257 return guintptr(unsafe.Pointer(gp)) 258 } 259 260 // setGNoWB performs *gp = new without a write barrier. 261 // For times when it's impractical to use a guintptr. 262 // 263 //go:nosplit 264 //go:nowritebarrier 265 func setGNoWB(gp **g, new *g) { 266 (*guintptr)(unsafe.Pointer(gp)).set(new) 267 } 268 269 type puintptr uintptr 270 271 //go:nosplit 272 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 273 274 //go:nosplit 275 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 276 277 // muintptr is a *m that is not tracked by the garbage collector. 278 // 279 // Because we do free Ms, there are some additional constrains on 280 // muintptrs: 281 // 282 // 1. Never hold an muintptr locally across a safe point. 283 // 284 // 2. Any muintptr in the heap must be owned by the M itself so it can 285 // ensure it is not in use when the last true *m is released. 286 type muintptr uintptr 287 288 //go:nosplit 289 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 290 291 //go:nosplit 292 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 293 294 // setMNoWB performs *mp = new without a write barrier. 295 // For times when it's impractical to use an muintptr. 296 // 297 //go:nosplit 298 //go:nowritebarrier 299 func setMNoWB(mp **m, new *m) { 300 (*muintptr)(unsafe.Pointer(mp)).set(new) 301 } 302 303 type gobuf struct { 304 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 305 // 306 // ctxt is unusual with respect to GC: it may be a 307 // heap-allocated funcval, so GC needs to track it, but it 308 // needs to be set and cleared from assembly, where it's 309 // difficult to have write barriers. However, ctxt is really a 310 // saved, live register, and we only ever exchange it between 311 // the real register and the gobuf. Hence, we treat it as a 312 // root during stack scanning, which means assembly that saves 313 // and restores it doesn't need write barriers. It's still 314 // typed as a pointer so that any other writes from Go get 315 // write barriers. 316 sp uintptr 317 pc uintptr 318 g guintptr 319 ctxt unsafe.Pointer 320 lr uintptr 321 bp uintptr // for framepointer-enabled architectures 322 } 323 324 // maybeTraceablePtr is a special pointer that is conditionally trackable 325 // by the GC. It consists of an address as a uintptr (vu) and a pointer 326 // to a data element (vp). 327 // 328 // maybeTraceablePtr values can be in one of three states: 329 // 1. Unset: vu == 0 && vp == nil 330 // 2. Untracked: vu != 0 && vp == nil 331 // 3. Tracked: vu != 0 && vp != nil 332 // 333 // Do not set fields manually. Use methods instead. 334 // Extend this type with additional methods if needed. 335 type maybeTraceablePtr struct { 336 vp unsafe.Pointer // For liveness only. 337 vu uintptr // Source of truth. 338 } 339 340 // untrack unsets the pointer but preserves the address. 341 // This is used to hide the pointer from the GC. 342 // 343 //go:nosplit 344 func (p *maybeTraceablePtr) setUntraceable() { 345 p.vp = nil 346 } 347 348 // setTraceable resets the pointer to the stored address. 349 // This is used to make the pointer visible to the GC. 350 // 351 //go:nosplit 352 func (p *maybeTraceablePtr) setTraceable() { 353 p.vp = unsafe.Pointer(p.vu) 354 } 355 356 // set sets the pointer to the data element and updates the address. 357 // 358 //go:nosplit 359 func (p *maybeTraceablePtr) set(v unsafe.Pointer) { 360 p.vp = v 361 p.vu = uintptr(v) 362 } 363 364 // get retrieves the pointer to the data element. 365 // 366 //go:nosplit 367 func (p *maybeTraceablePtr) get() unsafe.Pointer { 368 return unsafe.Pointer(p.vu) 369 } 370 371 // uintptr returns the uintptr address of the pointer. 372 // 373 //go:nosplit 374 func (p *maybeTraceablePtr) uintptr() uintptr { 375 return p.vu 376 } 377 378 // maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr) 379 // to track hchan pointers. 380 // 381 // Do not set fields manually. Use methods instead. 382 type maybeTraceableChan struct { 383 maybeTraceablePtr 384 } 385 386 //go:nosplit 387 func (p *maybeTraceableChan) set(c *hchan) { 388 p.maybeTraceablePtr.set(unsafe.Pointer(c)) 389 } 390 391 //go:nosplit 392 func (p *maybeTraceableChan) get() *hchan { 393 return (*hchan)(p.maybeTraceablePtr.get()) 394 } 395 396 // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving 397 // on a channel. 398 // 399 // sudog is necessary because the g ↔ synchronization object relation 400 // is many-to-many. A g can be on many wait lists, so there may be 401 // many sudogs for one g; and many gs may be waiting on the same 402 // synchronization object, so there may be many sudogs for one object. 403 // 404 // sudogs are allocated from a special pool. Use acquireSudog and 405 // releaseSudog to allocate and free them. 406 type sudog struct { 407 // The following fields are protected by the hchan.lock of the 408 // channel this sudog is blocking on. shrinkstack depends on 409 // this for sudogs involved in channel ops. 410 411 g *g 412 413 next *sudog 414 prev *sudog 415 416 elem maybeTraceablePtr // data element (may point to stack) 417 418 // The following fields are never accessed concurrently. 419 // For channels, waitlink is only accessed by g. 420 // For semaphores, all fields (including the ones above) 421 // are only accessed when holding a semaRoot lock. 422 423 acquiretime int64 424 releasetime int64 425 ticket uint32 426 427 // isSelect indicates g is participating in a select, so 428 // g.selectDone must be CAS'd to win the wake-up race. 429 isSelect bool 430 431 // success indicates whether communication over channel c 432 // succeeded. It is true if the goroutine was awoken because a 433 // value was delivered over channel c, and false if awoken 434 // because c was closed. 435 success bool 436 437 // waiters is a count of semaRoot waiting list other than head of list, 438 // clamped to a uint16 to fit in unused space. 439 // Only meaningful at the head of the list. 440 // (If we wanted to be overly clever, we could store a high 16 bits 441 // in the second entry in the list.) 442 waiters uint16 443 444 parent *sudog // semaRoot binary tree 445 waitlink *sudog // g.waiting list or semaRoot 446 waittail *sudog // semaRoot 447 c maybeTraceableChan // channel 448 } 449 450 type libcall struct { 451 fn uintptr 452 n uintptr // number of parameters 453 args uintptr // parameters 454 r1 uintptr // return values 455 r2 uintptr 456 err uintptr // error number 457 } 458 459 // Stack describes a Go execution stack. 460 // The bounds of the stack are exactly [lo, hi), 461 // with no implicit data structures on either side. 462 type stack struct { 463 lo uintptr 464 hi uintptr 465 } 466 467 // heldLockInfo gives info on a held lock and the rank of that lock 468 type heldLockInfo struct { 469 lockAddr uintptr 470 rank lockRank 471 } 472 473 type g struct { 474 // Stack parameters. 475 // stack describes the actual stack memory: [stack.lo, stack.hi). 476 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 477 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 478 // stackguard1 is the stack pointer compared in the //go:systemstack stack growth prologue. 479 // It is stack.lo+StackGuard on g0 and gsignal stacks. 480 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 481 stack stack // offset known to runtime/cgo 482 stackguard0 uintptr // offset known to liblink 483 stackguard1 uintptr // offset known to liblink 484 485 _panic *_panic // innermost panic - offset known to liblink 486 _defer *_defer // innermost defer 487 m *m // current m; offset known to arm liblink 488 sched gobuf 489 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 490 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 491 syscallbp uintptr // if status==Gsyscall, syscallbp = sched.bp to use in fpTraceback 492 stktopsp uintptr // expected sp at top of stack, to check in traceback 493 // param is a generic pointer parameter field used to pass 494 // values in particular contexts where other storage for the 495 // parameter would be difficult to find. It is currently used 496 // in four ways: 497 // 1. When a channel operation wakes up a blocked goroutine, it sets param to 498 // point to the sudog of the completed blocking operation. 499 // 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed 500 // the GC cycle. It is unsafe to do so in any other way, because the goroutine's 501 // stack may have moved in the meantime. 502 // 3. By debugCallWrap to pass parameters to a new goroutine because allocating a 503 // closure in the runtime is forbidden. 504 // 4. When a panic is recovered and control returns to the respective frame, 505 // param may point to a savedOpenDeferState. 506 param unsafe.Pointer 507 atomicstatus atomic.Uint32 508 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 509 goid uint64 510 schedlink guintptr 511 waitsince int64 // approx time when the g become blocked 512 waitreason waitReason // if status==Gwaiting 513 514 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 515 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 516 preemptShrink bool // shrink stack at synchronous safe point 517 518 // asyncSafePoint is set if g is stopped at an asynchronous 519 // safe point. This means there are frames on the stack 520 // without precise pointer information. 521 asyncSafePoint bool 522 523 paniconfault bool // panic (instead of crash) on unexpected fault address 524 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 525 throwsplit bool // must not split stack 526 // activeStackChans indicates that there are unlocked channels 527 // pointing into this goroutine's stack. If true, stack 528 // copying needs to acquire channel locks to protect these 529 // areas of the stack. 530 activeStackChans bool 531 // parkingOnChan indicates that the goroutine is about to 532 // park on a chansend or chanrecv. Used to signal an unsafe point 533 // for stack shrinking. 534 parkingOnChan atomic.Bool 535 // inMarkAssist indicates whether the goroutine is in mark assist. 536 // Used by the execution tracer. 537 inMarkAssist bool 538 coroexit bool // argument to coroswitch_m 539 540 raceignore int8 // ignore race detection events 541 nocgocallback bool // whether disable callback from C 542 tracking bool // whether we're tracking this G for sched latency statistics 543 trackingSeq uint8 // used to decide whether to track this G 544 trackingStamp int64 // timestamp of when the G last started being tracked 545 runnableTime int64 // the amount of time spent runnable, cleared when running, only used when tracking 546 lockedm muintptr 547 fipsIndicator uint8 548 syncSafePoint bool // set if g is stopped at a synchronous safe point. 549 runningCleanups atomic.Bool 550 sig uint32 551 writebuf []byte 552 sigcode0 uintptr 553 sigcode1 uintptr 554 sigpc uintptr 555 parentGoid uint64 // goid of goroutine that created this goroutine 556 gopc uintptr // pc of go statement that created this goroutine 557 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 558 startpc uintptr // pc of goroutine function 559 racectx uintptr 560 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 561 cgoCtxt []uintptr // cgo traceback context 562 labels unsafe.Pointer // profiler labels 563 timer *timer // cached timer for time.Sleep 564 sleepWhen int64 // when to sleep until 565 selectDone atomic.Uint32 // are we participating in a select and did someone win the race? 566 567 // goroutineProfiled indicates the status of this goroutine's stack for the 568 // current in-progress goroutine profile 569 goroutineProfiled goroutineProfileStateHolder 570 571 coroarg *coro // argument during coroutine transfers 572 bubble *synctestBubble 573 574 // xRegs stores the extended register state if this G has been 575 // asynchronously preempted. 576 xRegs xRegPerG 577 578 // Per-G tracer state. 579 trace gTraceState 580 581 // Per-G GC state 582 583 // gcAssistBytes is this G's GC assist credit in terms of 584 // bytes allocated. If this is positive, then the G has credit 585 // to allocate gcAssistBytes bytes without assisting. If this 586 // is negative, then the G must correct this by performing 587 // scan work. We track this in bytes to make it fast to update 588 // and check for debt in the malloc hot path. The assist ratio 589 // determines how this corresponds to scan work debt. 590 gcAssistBytes int64 591 592 // valgrindStackID is used to track what memory is used for stacks when a program is 593 // built with the "valgrind" build tag, otherwise it is unused. 594 valgrindStackID uintptr 595 } 596 597 // gTrackingPeriod is the number of transitions out of _Grunning between 598 // latency tracking runs. 599 const gTrackingPeriod = 8 600 601 const ( 602 // tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms, 603 // like Windows. 604 tlsSlots = 6 605 tlsSize = tlsSlots * goarch.PtrSize 606 ) 607 608 // Values for m.freeWait. 609 const ( 610 freeMStack = 0 // M done, free stack and reference. 611 freeMRef = 1 // M done, free reference. 612 freeMWait = 2 // M still in use. 613 ) 614 615 type m struct { 616 g0 *g // goroutine with scheduling stack 617 morebuf gobuf // gobuf arg to morestack 618 divmod uint32 // div/mod denominator for arm - known to liblink (cmd/internal/obj/arm/obj5.go) 619 620 // Fields whose offsets are not known to debuggers. 621 622 procid uint64 // for debuggers, but offset not hard-coded 623 gsignal *g // signal-handling g 624 goSigStack gsignalStack // Go-allocated signal handling stack 625 sigmask sigset // storage for saved signal mask 626 tls [tlsSlots]uintptr // thread-local storage (for x86 extern register) 627 mstartfn func() 628 curg *g // current running goroutine 629 caughtsig guintptr // goroutine running during fatal signal 630 631 // p is the currently attached P for executing Go code, nil if not executing user Go code. 632 // 633 // A non-nil p implies exclusive ownership of the P, unless curg is in _Gsyscall. 634 // In _Gsyscall the scheduler may mutate this instead. The point of synchronization 635 // is the _Gscan bit on curg's status. The scheduler must arrange to prevent curg 636 // from transitioning out of _Gsyscall if it intends to mutate p. 637 p puintptr 638 639 nextp puintptr // The next P to install before executing. Implies exclusive ownership of this P. 640 oldp puintptr // The P that was attached before executing a syscall. 641 id int64 642 mallocing int32 643 throwing throwType 644 preemptoff string // if != "", keep curg running on this m 645 locks int32 646 dying int32 647 profilehz int32 648 spinning bool // m is out of work and is actively looking for work 649 blocked bool // m is blocked on a note 650 newSigstack bool // minit on C thread called sigaltstack 651 printlock int8 652 incgo bool // m is executing a cgo call 653 isextra bool // m is an extra m 654 isExtraInC bool // m is an extra m that does not have any Go frames 655 isExtraInSig bool // m is an extra m in a signal handler 656 freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) 657 needextram bool 658 g0StackAccurate bool // whether the g0 stack has accurate bounds 659 traceback uint8 660 allpSnapshot []*p // Snapshot of allp for use after dropping P in findRunnable, nil otherwise. 661 ncgocall uint64 // number of cgo calls in total 662 ncgo int32 // number of cgo calls currently in progress 663 cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily 664 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 665 park note 666 alllink *m // on allm 667 schedlink muintptr 668 idleNode listNodeManual 669 lockedg guintptr 670 createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it. 671 lockedExt uint32 // tracking for external LockOSThread 672 lockedInt uint32 // tracking for internal lockOSThread 673 mWaitList mWaitList // list of runtime lock waiters 674 675 mLockProfile mLockProfile // fields relating to runtime.lock contention 676 profStack []uintptr // used for memory/block/mutex stack traces 677 678 // wait* are used to carry arguments from gopark into park_m, because 679 // there's no stack to put them on. That is their sole purpose. 680 waitunlockf func(*g, unsafe.Pointer) bool 681 waitlock unsafe.Pointer 682 waitTraceSkip int 683 waitTraceBlockReason traceBlockReason 684 685 syscalltick uint32 686 freelink *m // on sched.freem 687 trace mTraceState 688 689 // These are here to avoid using the G stack so the stack can move during the call. 690 libcallpc uintptr // for cpu profiler 691 libcallsp uintptr 692 libcallg guintptr 693 winsyscall winlibcall // stores syscall parameters on windows 694 695 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 696 vdsoPC uintptr // PC for traceback while in VDSO call 697 698 // preemptGen counts the number of completed preemption 699 // signals. This is used to detect when a preemption is 700 // requested, but fails. 701 preemptGen atomic.Uint32 702 703 // Whether this is a pending preemption signal on this M. 704 signalPending atomic.Uint32 705 706 // pcvalue lookup cache 707 pcvalueCache pcvalueCache 708 709 dlogPerM 710 711 mOS 712 713 chacha8 chacha8rand.State 714 cheaprand uint64 715 716 // Up to 10 locks held by this m, maintained by the lock ranking code. 717 locksHeldLen int 718 locksHeld [10]heldLockInfo 719 720 // self points this M until mexit clears it to return nil. 721 self mWeakPointer 722 } 723 724 const mRedZoneSize = (16 << 3) * asanenabledBit // redZoneSize(2048) 725 726 type mPadded struct { 727 m 728 729 // Size the runtime.m structure so it fits in the 2048-byte size class, and 730 // not in the next-smallest (1792-byte) size class. That leaves the 11 low 731 // bits of muintptr values available for flags, as required by 732 // lock_spinbit.go. 733 _ [(1 - goarch.IsWasm) * (2048 - mallocHeaderSize - mRedZoneSize - unsafe.Sizeof(m{}))]byte 734 } 735 736 // mWeakPointer is a "weak" pointer to an M. A weak pointer for each M is 737 // available as m.self. Users may copy mWeakPointer arbitrarily, and get will 738 // return the M if it is still live, or nil after mexit. 739 // 740 // The zero value is treated as a nil pointer. 741 // 742 // Note that get may race with M exit. A successful get will keep the m object 743 // alive, but the M itself may be exited and thus not actually usable. 744 type mWeakPointer struct { 745 m *atomic.Pointer[m] 746 } 747 748 func newMWeakPointer(mp *m) mWeakPointer { 749 w := mWeakPointer{m: new(atomic.Pointer[m])} 750 w.m.Store(mp) 751 return w 752 } 753 754 func (w mWeakPointer) get() *m { 755 if w.m == nil { 756 return nil 757 } 758 return w.m.Load() 759 } 760 761 // clear sets the weak pointer to nil. It cannot be used on zero value 762 // mWeakPointers. 763 func (w mWeakPointer) clear() { 764 w.m.Store(nil) 765 } 766 767 type p struct { 768 id int32 769 status uint32 // one of pidle/prunning/... 770 link puintptr 771 schedtick uint32 // incremented on every scheduler call 772 syscalltick uint32 // incremented on every system call 773 sysmontick sysmontick // last tick observed by sysmon 774 m muintptr // back-link to associated m (nil if idle) 775 mcache *mcache 776 pcache pageCache 777 raceprocctx uintptr 778 779 // oldm is the previous m this p ran on. 780 // 781 // We are not assosciated with this m, so we have no control over its 782 // lifecycle. This value is an m.self object which points to the m 783 // until the m exits. 784 // 785 // Note that this m may be idle, running, or exiting. It should only be 786 // used with mgetSpecific, which will take ownership of the m only if 787 // it is idle. 788 oldm mWeakPointer 789 790 deferpool []*_defer // pool of available defer structs (see panic.go) 791 deferpoolbuf [32]*_defer 792 793 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 794 goidcache uint64 795 goidcacheend uint64 796 797 // Queue of runnable goroutines. Accessed without lock. 798 runqhead uint32 799 runqtail uint32 800 runq [256]guintptr 801 // runnext, if non-nil, is a runnable G that was ready'd by 802 // the current G and should be run next instead of what's in 803 // runq if there's time remaining in the running G's time 804 // slice. It will inherit the time left in the current time 805 // slice. If a set of goroutines is locked in a 806 // communicate-and-wait pattern, this schedules that set as a 807 // unit and eliminates the (potentially large) scheduling 808 // latency that otherwise arises from adding the ready'd 809 // goroutines to the end of the run queue. 810 // 811 // Note that while other P's may atomically CAS this to zero, 812 // only the owner P can CAS it to a valid G. 813 runnext guintptr 814 815 // Available G's (status == Gdead) 816 gFree gList 817 818 sudogcache []*sudog 819 sudogbuf [128]*sudog 820 821 // Cache of mspan objects from the heap. 822 mspancache struct { 823 // We need an explicit length here because this field is used 824 // in allocation codepaths where write barriers are not allowed, 825 // and eliminating the write barrier/keeping it eliminated from 826 // slice updates is tricky, more so than just managing the length 827 // ourselves. 828 len int 829 buf [128]*mspan 830 } 831 832 // Cache of a single pinner object to reduce allocations from repeated 833 // pinner creation. 834 pinnerCache *pinner 835 836 trace pTraceState 837 838 palloc persistentAlloc // per-P to avoid mutex 839 840 // Per-P GC state 841 gcAssistTime int64 // Nanoseconds in assistAlloc 842 gcFractionalMarkTime atomic.Int64 // Nanoseconds in fractional mark worker 843 844 // limiterEvent tracks events for the GC CPU limiter. 845 limiterEvent limiterEvent 846 847 // gcMarkWorkerMode is the mode for the next mark worker to run in. 848 // That is, this is used to communicate with the worker goroutine 849 // selected for immediate execution by 850 // gcController.findRunnableGCWorker. When scheduling other goroutines, 851 // this field must be set to gcMarkWorkerNotWorker. 852 gcMarkWorkerMode gcMarkWorkerMode 853 // gcMarkWorkerStartTime is the nanotime() at which the most recent 854 // mark worker started. 855 gcMarkWorkerStartTime int64 856 857 // nextGCMarkWorker is the next mark worker to run. This may be set 858 // during start-the-world to assign a worker to this P. The P runs this 859 // worker on the next call to gcController.findRunnableGCWorker. If the 860 // P runs something else or stops, it must release this worker via 861 // gcController.releaseNextGCMarkWorker. 862 // 863 // See comment in gcBgMarkWorker about the lifetime of 864 // gcBgMarkWorkerNode. 865 // 866 // Only accessed by this P or during STW. 867 nextGCMarkWorker *gcBgMarkWorkerNode 868 869 // gcw is this P's GC work buffer cache. The work buffer is 870 // filled by write barriers, drained by mutator assists, and 871 // disposed on certain GC state transitions. 872 gcw gcWork 873 874 // wbBuf is this P's GC write barrier buffer. 875 // 876 // TODO: Consider caching this in the running G. 877 wbBuf wbBuf 878 879 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 880 881 // statsSeq is a counter indicating whether this P is currently 882 // writing any stats. Its value is even when not, odd when it is. 883 statsSeq atomic.Uint32 884 885 // Timer heap. 886 timers timers 887 888 // Cleanups. 889 cleanups *cleanupBlock 890 cleanupsQueued uint64 // monotonic count of cleanups queued by this P 891 892 // maxStackScanDelta accumulates the amount of stack space held by 893 // live goroutines (i.e. those eligible for stack scanning). 894 // Flushed to gcController.maxStackScan once maxStackScanSlack 895 // or -maxStackScanSlack is reached. 896 maxStackScanDelta int64 897 898 // gc-time statistics about current goroutines 899 // Note that this differs from maxStackScan in that this 900 // accumulates the actual stack observed to be used at GC time (hi - sp), 901 // not an instantaneous measure of the total stack size that might need 902 // to be scanned (hi - lo). 903 scannedStackSize uint64 // stack size of goroutines scanned by this P 904 scannedStacks uint64 // number of goroutines scanned by this P 905 906 // preempt is set to indicate that this P should be enter the 907 // scheduler ASAP (regardless of what G is running on it). 908 preempt bool 909 910 // gcStopTime is the nanotime timestamp that this P last entered _Pgcstop. 911 gcStopTime int64 912 913 // goroutinesCreated is the total count of goroutines created by this P. 914 goroutinesCreated uint64 915 916 // xRegs is the per-P extended register state used by asynchronous 917 // preemption. This is an empty struct on platforms that don't use extended 918 // register state. 919 xRegs xRegPerP 920 921 // Padding is no longer needed. False sharing is now not a worry because p is large enough 922 // that its size class is an integer multiple of the cache line size (for any of our architectures). 923 } 924 925 type schedt struct { 926 goidgen atomic.Uint64 927 lastpoll atomic.Int64 // time of last network poll, 0 if currently polling 928 pollUntil atomic.Int64 // time to which current poll is sleeping 929 pollingNet atomic.Int32 // 1 if some P doing non-blocking network poll 930 931 lock mutex 932 933 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 934 // sure to call checkdead(). 935 936 midle listHeadManual // idle m's waiting for work 937 nmidle int32 // number of idle m's waiting for work 938 nmidlelocked int32 // number of locked m's waiting for work 939 mnext int64 // number of m's that have been created and next M ID 940 maxmcount int32 // maximum number of m's allowed (or die) 941 nmsys int32 // number of system m's not counted for deadlock 942 nmfreed int64 // cumulative number of freed m's 943 944 ngsys atomic.Int32 // number of system goroutines 945 nGsyscallNoP atomic.Int32 // number of goroutines in syscalls without a P 946 947 pidle puintptr // idle p's 948 npidle atomic.Int32 949 nmspinning atomic.Int32 // See "Worker thread parking/unparking" comment in proc.go. 950 needspinning atomic.Uint32 // See "Delicate dance" comment in proc.go. Boolean. Must hold sched.lock to set to 1. 951 952 // Global runnable queue. 953 runq gQueue 954 955 // disable controls selective disabling of the scheduler. 956 // 957 // Use schedEnableUser to control this. 958 // 959 // disable is protected by sched.lock. 960 disable struct { 961 // user disables scheduling of user goroutines. 962 user bool 963 runnable gQueue // pending runnable Gs 964 } 965 966 // Global cache of dead G's. 967 gFree struct { 968 lock mutex 969 stack gList // Gs with stacks 970 noStack gList // Gs without stacks 971 } 972 973 // Central cache of sudog structs. 974 sudoglock mutex 975 sudogcache *sudog 976 977 // Central pool of available defer structs. 978 deferlock mutex 979 deferpool *_defer 980 981 // freem is the list of m's waiting to be freed when their 982 // m.exited is set. Linked through m.freelink. 983 freem *m 984 985 gcwaiting atomic.Bool // gc is waiting to run 986 stopwait int32 987 stopnote note 988 sysmonwait atomic.Bool 989 sysmonnote note 990 991 // safePointFn should be called on each P at the next GC 992 // safepoint if p.runSafePointFn is set. 993 safePointFn func(*p) 994 safePointWait int32 995 safePointNote note 996 997 profilehz int32 // cpu profiling rate 998 999 procresizetime int64 // nanotime() of last change to gomaxprocs 1000 totaltime int64 // ∫gomaxprocs dt up to procresizetime 1001 1002 customGOMAXPROCS bool // GOMAXPROCS was manually set from the environment or runtime.GOMAXPROCS 1003 1004 // sysmonlock protects sysmon's actions on the runtime. 1005 // 1006 // Acquire and hold this mutex to block sysmon from interacting 1007 // with the rest of the runtime. 1008 sysmonlock mutex 1009 1010 // timeToRun is a distribution of scheduling latencies, defined 1011 // as the sum of time a G spends in the _Grunnable state before 1012 // it transitions to _Grunning. 1013 timeToRun timeHistogram 1014 1015 // idleTime is the total CPU time Ps have "spent" idle. 1016 // 1017 // Reset on each GC cycle. 1018 idleTime atomic.Int64 1019 1020 // totalMutexWaitTime is the sum of time goroutines have spent in _Gwaiting 1021 // with a waitreason of the form waitReasonSync{RW,}Mutex{R,}Lock. 1022 totalMutexWaitTime atomic.Int64 1023 1024 // stwStoppingTimeGC/Other are distributions of stop-the-world stopping 1025 // latencies, defined as the time taken by stopTheWorldWithSema to get 1026 // all Ps to stop. stwStoppingTimeGC covers all GC-related STWs, 1027 // stwStoppingTimeOther covers the others. 1028 stwStoppingTimeGC timeHistogram 1029 stwStoppingTimeOther timeHistogram 1030 1031 // stwTotalTimeGC/Other are distributions of stop-the-world total 1032 // latencies, defined as the total time from stopTheWorldWithSema to 1033 // startTheWorldWithSema. This is a superset of 1034 // stwStoppingTimeGC/Other. stwTotalTimeGC covers all GC-related STWs, 1035 // stwTotalTimeOther covers the others. 1036 stwTotalTimeGC timeHistogram 1037 stwTotalTimeOther timeHistogram 1038 1039 // totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in 1040 // allm) is the sum of time goroutines have spent in _Grunnable and with an 1041 // M, but waiting for locks within the runtime. This field stores the value 1042 // for Ms that have exited. 1043 totalRuntimeLockWaitTime atomic.Int64 1044 1045 // goroutinesCreated (plus the value of goroutinesCreated on each P in allp) 1046 // is the sum of all goroutines created by the program. 1047 goroutinesCreated atomic.Uint64 1048 } 1049 1050 // Values for the flags field of a sigTabT. 1051 const ( 1052 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 1053 _SigKill // if signal.Notify doesn't take it, exit quietly 1054 _SigThrow // if signal.Notify doesn't take it, exit loudly 1055 _SigPanic // if the signal is from the kernel, panic 1056 _SigDefault // if the signal isn't explicitly requested, don't monitor it 1057 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 1058 _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler 1059 _SigUnblock // always unblock; see blockableSig 1060 _SigIgn // _SIG_DFL action is to ignore the signal 1061 ) 1062 1063 // Layout of in-memory per-function information prepared by linker 1064 // See https://golang.org/s/go12symtab. 1065 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 1066 // and with package debug/gosym and with symtab.go in package runtime. 1067 type _func struct { 1068 sys.NotInHeap // Only in static data 1069 1070 entryOff uint32 // start pc, as offset from moduledata.text 1071 nameOff int32 // function name, as index into moduledata.funcnametab. 1072 1073 args int32 // in/out args size 1074 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. 1075 1076 pcsp uint32 1077 pcfile uint32 1078 pcln uint32 1079 npcdata uint32 1080 cuOffset uint32 // runtime.cutab offset of this function's CU 1081 startLine int32 // line number of start of function (func keyword/TEXT directive) 1082 funcID abi.FuncID // set for certain special runtime functions 1083 flag abi.FuncFlag 1084 _ [1]byte // pad 1085 nfuncdata uint8 // must be last, must end on a uint32-aligned boundary 1086 1087 // The end of the struct is followed immediately by two variable-length 1088 // arrays that reference the pcdata and funcdata locations for this 1089 // function. 1090 1091 // pcdata contains the offset into moduledata.pctab for the start of 1092 // that index's table. e.g., 1093 // &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of 1094 // the unsafe point table. 1095 // 1096 // An offset of 0 indicates that there is no table. 1097 // 1098 // pcdata [npcdata]uint32 1099 1100 // funcdata contains the offset past moduledata.gofunc which contains a 1101 // pointer to that index's funcdata. e.g., 1102 // *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is 1103 // the argument pointer map. 1104 // 1105 // An offset of ^uint32(0) indicates that there is no entry. 1106 // 1107 // funcdata [nfuncdata]uint32 1108 } 1109 1110 // Pseudo-Func that is returned for PCs that occur in inlined code. 1111 // A *Func can be either a *_func or a *funcinl, and they are distinguished 1112 // by the first uintptr. 1113 // 1114 // TODO(austin): Can we merge this with inlinedCall? 1115 type funcinl struct { 1116 ones uint32 // set to ^0 to distinguish from _func 1117 entry uintptr // entry of the real (the "outermost") frame 1118 name string 1119 file string 1120 line int32 1121 startLine int32 1122 } 1123 1124 type itab = abi.ITab 1125 1126 // Lock-free stack node. 1127 // Also known to export_test.go. 1128 type lfnode struct { 1129 next uint64 1130 pushcnt uintptr 1131 } 1132 1133 type forcegcstate struct { 1134 lock mutex 1135 g *g 1136 idle atomic.Bool 1137 } 1138 1139 // A _defer holds an entry on the list of deferred calls. 1140 // If you add a field here, add code to clear it in deferProcStack. 1141 // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct 1142 // and cmd/compile/internal/ssagen/ssa.go:(*state).call. 1143 // Some defers will be allocated on the stack and some on the heap. 1144 // All defers are logically part of the stack, so write barriers to 1145 // initialize them are not required. All defers must be manually scanned, 1146 // and for heap defers, marked. 1147 type _defer struct { 1148 heap bool 1149 rangefunc bool // true for rangefunc list 1150 sp uintptr // sp at time of defer 1151 pc uintptr // pc at time of defer 1152 fn func() // can be nil for open-coded defers 1153 link *_defer // next defer on G; can point to either heap or stack! 1154 1155 // If rangefunc is true, *head is the head of the atomic linked list 1156 // during a range-over-func execution. 1157 head *atomic.Pointer[_defer] 1158 } 1159 1160 // A _panic holds information about an active panic. 1161 // 1162 // A _panic value must only ever live on the stack. 1163 // 1164 // The gopanicFP and link fields are stack pointers, but don't need special 1165 // handling during stack growth: because they are pointer-typed and 1166 // _panic values only live on the stack, regular stack pointer 1167 // adjustment takes care of them. 1168 type _panic struct { 1169 arg any // argument to panic 1170 link *_panic // link to earlier panic 1171 1172 // startPC and startSP track where _panic.start was called. 1173 startPC uintptr 1174 startSP unsafe.Pointer 1175 1176 // The current stack frame that we're running deferred calls for. 1177 sp unsafe.Pointer 1178 lr uintptr 1179 fp unsafe.Pointer 1180 1181 // retpc stores the PC where the panic should jump back to, if the 1182 // function last returned by _panic.next() recovers the panic. 1183 retpc uintptr 1184 1185 // Extra state for handling open-coded defers. 1186 deferBitsPtr *uint8 1187 slotsPtr unsafe.Pointer 1188 1189 recovered bool // whether this panic has been recovered 1190 repanicked bool // whether this panic repanicked 1191 goexit bool 1192 deferreturn bool 1193 1194 gopanicFP unsafe.Pointer // frame pointer of the gopanic frame 1195 } 1196 1197 // savedOpenDeferState tracks the extra state from _panic that's 1198 // necessary for deferreturn to pick up where gopanic left off, 1199 // without needing to unwind the stack. 1200 type savedOpenDeferState struct { 1201 retpc uintptr 1202 deferBitsOffset uintptr 1203 slotsOffset uintptr 1204 } 1205 1206 // ancestorInfo records details of where a goroutine was started. 1207 type ancestorInfo struct { 1208 pcs []uintptr // pcs from the stack of this goroutine 1209 goid uint64 // goroutine id of this goroutine; original goroutine possibly dead 1210 gopc uintptr // pc of go statement that created this goroutine 1211 } 1212 1213 // A waitReason explains why a goroutine has been stopped. 1214 // See gopark. Do not re-use waitReasons, add new ones. 1215 type waitReason uint8 1216 1217 const ( 1218 waitReasonZero waitReason = iota // "" 1219 waitReasonGCAssistMarking // "GC assist marking" 1220 waitReasonIOWait // "IO wait" 1221 waitReasonDumpingHeap // "dumping heap" 1222 waitReasonGarbageCollection // "garbage collection" 1223 waitReasonGarbageCollectionScan // "garbage collection scan" 1224 waitReasonPanicWait // "panicwait" 1225 waitReasonGCAssistWait // "GC assist wait" 1226 waitReasonGCSweepWait // "GC sweep wait" 1227 waitReasonGCScavengeWait // "GC scavenge wait" 1228 waitReasonFinalizerWait // "finalizer wait" 1229 waitReasonForceGCIdle // "force gc (idle)" 1230 waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)" 1231 waitReasonSemacquire // "semacquire" 1232 waitReasonSleep // "sleep" 1233 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 1234 waitReasonChanSendNilChan // "chan send (nil chan)" 1235 waitReasonSelectNoCases // "select (no cases)" 1236 waitReasonSelect // "select" 1237 waitReasonChanReceive // "chan receive" 1238 waitReasonChanSend // "chan send" 1239 waitReasonSyncCondWait // "sync.Cond.Wait" 1240 waitReasonSyncMutexLock // "sync.Mutex.Lock" 1241 waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock" 1242 waitReasonSyncRWMutexLock // "sync.RWMutex.Lock" 1243 waitReasonSyncWaitGroupWait // "sync.WaitGroup.Wait" 1244 waitReasonTraceReaderBlocked // "trace reader (blocked)" 1245 waitReasonWaitForGCCycle // "wait for GC cycle" 1246 waitReasonGCWorkerIdle // "GC worker (idle)" 1247 waitReasonGCWorkerActive // "GC worker (active)" 1248 waitReasonPreempted // "preempted" 1249 waitReasonDebugCall // "debug call" 1250 waitReasonGCMarkTermination // "GC mark termination" 1251 waitReasonStoppingTheWorld // "stopping the world" 1252 waitReasonFlushProcCaches // "flushing proc caches" 1253 waitReasonTraceGoroutineStatus // "trace goroutine status" 1254 waitReasonTraceProcStatus // "trace proc status" 1255 waitReasonPageTraceFlush // "page trace flush" 1256 waitReasonCoroutine // "coroutine" 1257 waitReasonGCWeakToStrongWait // "GC weak to strong wait" 1258 waitReasonSynctestRun // "synctest.Run" 1259 waitReasonSynctestWait // "synctest.Wait" 1260 waitReasonSynctestChanReceive // "chan receive (durable)" 1261 waitReasonSynctestChanSend // "chan send (durable)" 1262 waitReasonSynctestSelect // "select (durable)" 1263 waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (durable)" 1264 waitReasonCleanupWait // "cleanup wait" 1265 ) 1266 1267 var waitReasonStrings = [...]string{ 1268 waitReasonZero: "", 1269 waitReasonGCAssistMarking: "GC assist marking", 1270 waitReasonIOWait: "IO wait", 1271 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 1272 waitReasonChanSendNilChan: "chan send (nil chan)", 1273 waitReasonDumpingHeap: "dumping heap", 1274 waitReasonGarbageCollection: "garbage collection", 1275 waitReasonGarbageCollectionScan: "garbage collection scan", 1276 waitReasonPanicWait: "panicwait", 1277 waitReasonSelect: "select", 1278 waitReasonSelectNoCases: "select (no cases)", 1279 waitReasonGCAssistWait: "GC assist wait", 1280 waitReasonGCSweepWait: "GC sweep wait", 1281 waitReasonGCScavengeWait: "GC scavenge wait", 1282 waitReasonChanReceive: "chan receive", 1283 waitReasonChanSend: "chan send", 1284 waitReasonFinalizerWait: "finalizer wait", 1285 waitReasonForceGCIdle: "force gc (idle)", 1286 waitReasonUpdateGOMAXPROCSIdle: "GOMAXPROCS updater (idle)", 1287 waitReasonSemacquire: "semacquire", 1288 waitReasonSleep: "sleep", 1289 waitReasonSyncCondWait: "sync.Cond.Wait", 1290 waitReasonSyncMutexLock: "sync.Mutex.Lock", 1291 waitReasonSyncRWMutexRLock: "sync.RWMutex.RLock", 1292 waitReasonSyncRWMutexLock: "sync.RWMutex.Lock", 1293 waitReasonSyncWaitGroupWait: "sync.WaitGroup.Wait", 1294 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1295 waitReasonWaitForGCCycle: "wait for GC cycle", 1296 waitReasonGCWorkerIdle: "GC worker (idle)", 1297 waitReasonGCWorkerActive: "GC worker (active)", 1298 waitReasonPreempted: "preempted", 1299 waitReasonDebugCall: "debug call", 1300 waitReasonGCMarkTermination: "GC mark termination", 1301 waitReasonStoppingTheWorld: "stopping the world", 1302 waitReasonFlushProcCaches: "flushing proc caches", 1303 waitReasonTraceGoroutineStatus: "trace goroutine status", 1304 waitReasonTraceProcStatus: "trace proc status", 1305 waitReasonPageTraceFlush: "page trace flush", 1306 waitReasonCoroutine: "coroutine", 1307 waitReasonGCWeakToStrongWait: "GC weak to strong wait", 1308 waitReasonSynctestRun: "synctest.Run", 1309 waitReasonSynctestWait: "synctest.Wait", 1310 waitReasonSynctestChanReceive: "chan receive (durable)", 1311 waitReasonSynctestChanSend: "chan send (durable)", 1312 waitReasonSynctestSelect: "select (durable)", 1313 waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (durable)", 1314 waitReasonCleanupWait: "cleanup wait", 1315 } 1316 1317 func (w waitReason) String() string { 1318 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1319 return "unknown wait reason" 1320 } 1321 return waitReasonStrings[w] 1322 } 1323 1324 // isMutexWait returns true if the goroutine is blocked because of 1325 // sync.Mutex.Lock or sync.RWMutex.[R]Lock. 1326 // 1327 //go:nosplit 1328 func (w waitReason) isMutexWait() bool { 1329 return w == waitReasonSyncMutexLock || 1330 w == waitReasonSyncRWMutexRLock || 1331 w == waitReasonSyncRWMutexLock 1332 } 1333 1334 // isSyncWait returns true if the goroutine is blocked because of 1335 // sync library primitive operations. 1336 // 1337 //go:nosplit 1338 func (w waitReason) isSyncWait() bool { 1339 return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait 1340 } 1341 1342 // isChanWait is true if the goroutine is blocked because of non-nil 1343 // channel operations or a select statement with at least one case. 1344 // 1345 //go:nosplit 1346 func (w waitReason) isChanWait() bool { 1347 return w == waitReasonSelect || 1348 w == waitReasonChanReceive || 1349 w == waitReasonChanSend 1350 } 1351 1352 func (w waitReason) isWaitingForSuspendG() bool { 1353 return isWaitingForSuspendG[w] 1354 } 1355 1356 // isWaitingForSuspendG indicates that a goroutine is only entering _Gwaiting and 1357 // setting a waitReason because it needs to be able to let the suspendG 1358 // (used by the GC and the execution tracer) take ownership of its stack. 1359 // The G is always actually executing on the system stack in these cases. 1360 // 1361 // TODO(mknyszek): Consider replacing this with a new dedicated G status. 1362 var isWaitingForSuspendG = [len(waitReasonStrings)]bool{ 1363 waitReasonStoppingTheWorld: true, 1364 waitReasonGCMarkTermination: true, 1365 waitReasonGarbageCollection: true, 1366 waitReasonGarbageCollectionScan: true, 1367 waitReasonTraceGoroutineStatus: true, 1368 waitReasonTraceProcStatus: true, 1369 waitReasonPageTraceFlush: true, 1370 waitReasonGCAssistMarking: true, 1371 waitReasonGCWorkerActive: true, 1372 waitReasonFlushProcCaches: true, 1373 } 1374 1375 func (w waitReason) isIdleInSynctest() bool { 1376 return isIdleInSynctest[w] 1377 } 1378 1379 // isIdleInSynctest indicates that a goroutine is considered idle by synctest.Wait. 1380 var isIdleInSynctest = [len(waitReasonStrings)]bool{ 1381 waitReasonChanReceiveNilChan: true, 1382 waitReasonChanSendNilChan: true, 1383 waitReasonSelectNoCases: true, 1384 waitReasonSleep: true, 1385 waitReasonSyncCondWait: true, 1386 waitReasonSynctestWaitGroupWait: true, 1387 waitReasonCoroutine: true, 1388 waitReasonSynctestRun: true, 1389 waitReasonSynctestWait: true, 1390 waitReasonSynctestChanReceive: true, 1391 waitReasonSynctestChanSend: true, 1392 waitReasonSynctestSelect: true, 1393 } 1394 1395 var ( 1396 // Linked-list of all Ms. Written under sched.lock, read atomically. 1397 allm *m 1398 1399 gomaxprocs int32 1400 numCPUStartup int32 1401 forcegc forcegcstate 1402 sched schedt 1403 newprocs int32 1404 ) 1405 1406 var ( 1407 // allpLock protects P-less reads and size changes of allp, idlepMask, 1408 // and timerpMask, and all writes to allp. 1409 allpLock mutex 1410 1411 // len(allp) == gomaxprocs; may change at safe points, otherwise 1412 // immutable. 1413 allp []*p 1414 1415 // Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must 1416 // be atomic. Length may change at safe points. 1417 // 1418 // Each P must update only its own bit. In order to maintain 1419 // consistency, a P going idle must set the idle mask simultaneously with 1420 // updates to the idle P list under the sched.lock, otherwise a racing 1421 // pidleget may clear the mask before pidleput sets the mask, 1422 // corrupting the bitmap. 1423 // 1424 // N.B., procresize takes ownership of all Ps in stopTheWorldWithSema. 1425 idlepMask pMask 1426 1427 // Bitmask of Ps that may have a timer, one bit per P. Reads and writes 1428 // must be atomic. Length may change at safe points. 1429 // 1430 // Ideally, the timer mask would be kept immediately consistent on any timer 1431 // operations. Unfortunately, updating a shared global data structure in the 1432 // timer hot path adds too much overhead in applications frequently switching 1433 // between no timers and some timers. 1434 // 1435 // As a compromise, the timer mask is updated only on pidleget / pidleput. A 1436 // running P (returned by pidleget) may add a timer at any time, so its mask 1437 // must be set. An idle P (passed to pidleput) cannot add new timers while 1438 // idle, so if it has no timers at that time, its mask may be cleared. 1439 // 1440 // Thus, we get the following effects on timer-stealing in findRunnable: 1441 // 1442 // - Idle Ps with no timers when they go idle are never checked in findRunnable 1443 // (for work- or timer-stealing; this is the ideal case). 1444 // - Running Ps must always be checked. 1445 // - Idle Ps whose timers are stolen must continue to be checked until they run 1446 // again, even after timer expiration. 1447 // 1448 // When the P starts running again, the mask should be set, as a timer may be 1449 // added at any time. 1450 // 1451 // TODO(prattmic): Additional targeted updates may improve the above cases. 1452 // e.g., updating the mask when stealing a timer. 1453 timerpMask pMask 1454 ) 1455 1456 // goarmsoftfp is used by runtime/cgo assembly. 1457 // 1458 //go:linkname goarmsoftfp 1459 1460 var ( 1461 // Pool of GC parked background workers. Entries are type 1462 // *gcBgMarkWorkerNode. 1463 gcBgMarkWorkerPool lfstack 1464 1465 // Total number of gcBgMarkWorker goroutines. Protected by worldsema. 1466 gcBgMarkWorkerCount int32 1467 1468 // Information about what cpu features are available. 1469 // Packages outside the runtime should not use these 1470 // as they are not an external api. 1471 // Set on startup in asm_{386,amd64}.s 1472 processorVersionInfo uint32 1473 isIntel bool 1474 ) 1475 1476 // set by cmd/link on arm systems 1477 // accessed using linkname by internal/runtime/atomic. 1478 // 1479 // goarm should be an internal detail, 1480 // but widely used packages access it using linkname. 1481 // Notable members of the hall of shame include: 1482 // - github.com/creativeprojects/go-selfupdate 1483 // 1484 // Do not remove or change the type signature. 1485 // See go.dev/issue/67401. 1486 // 1487 //go:linkname goarm 1488 var ( 1489 goarm uint8 1490 goarmsoftfp uint8 1491 ) 1492 1493 // Set by the linker so the runtime can determine the buildmode. 1494 var ( 1495 islibrary bool // -buildmode=c-shared 1496 isarchive bool // -buildmode=c-archive 1497 ) 1498 1499 // Must agree with internal/buildcfg.FramePointerEnabled. 1500 const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" 1501 1502 // getcallerfp returns the frame pointer of the caller of the caller 1503 // of this function. 1504 // 1505 //go:nosplit 1506 //go:noinline 1507 func getcallerfp() uintptr { 1508 fp := getfp() // This frame's FP. 1509 if fp != 0 { 1510 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's FP. 1511 fp = *(*uintptr)(unsafe.Pointer(fp)) // The caller's caller's FP. 1512 } 1513 return fp 1514 } 1515