Source file src/runtime/mprof.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Malloc profiling. 6 // Patterned after tcmalloc's algorithms; shorter code. 7 8 package runtime 9 10 import ( 11 "internal/abi" 12 "internal/runtime/atomic" 13 "runtime/internal/sys" 14 "unsafe" 15 ) 16 17 // NOTE(rsc): Everything here could use cas if contention became an issue. 18 var ( 19 // profInsertLock protects changes to the start of all *bucket linked lists 20 profInsertLock mutex 21 // profBlockLock protects the contents of every blockRecord struct 22 profBlockLock mutex 23 // profMemActiveLock protects the active field of every memRecord struct 24 profMemActiveLock mutex 25 // profMemFutureLock is a set of locks that protect the respective elements 26 // of the future array of every memRecord struct 27 profMemFutureLock [len(memRecord{}.future)]mutex 28 ) 29 30 // All memory allocations are local and do not escape outside of the profiler. 31 // The profiler is forbidden from referring to garbage-collected memory. 32 33 const ( 34 // profile types 35 memProfile bucketType = 1 + iota 36 blockProfile 37 mutexProfile 38 39 // size of bucket hash table 40 buckHashSize = 179999 41 42 // maxStack is the max depth of stack to record in bucket. 43 // Note that it's only used internally as a guard against 44 // wildly out-of-bounds slicing of the PCs that come after 45 // a bucket struct, and it could increase in the future. 46 // The "+ 1" is to account for the first stack entry being 47 // taken up by a "skip" sentinel value for profilers which 48 // defer inline frame expansion until the profile is reported. 49 maxStack = 32 + 1 50 ) 51 52 type bucketType int 53 54 // A bucket holds per-call-stack profiling information. 55 // The representation is a bit sleazy, inherited from C. 56 // This struct defines the bucket header. It is followed in 57 // memory by the stack words and then the actual record 58 // data, either a memRecord or a blockRecord. 59 // 60 // Per-call-stack profiling information. 61 // Lookup by hashing call stack into a linked-list hash table. 62 // 63 // None of the fields in this bucket header are modified after 64 // creation, including its next and allnext links. 65 // 66 // No heap pointers. 67 type bucket struct { 68 _ sys.NotInHeap 69 next *bucket 70 allnext *bucket 71 typ bucketType // memBucket or blockBucket (includes mutexProfile) 72 hash uintptr 73 size uintptr 74 nstk uintptr 75 } 76 77 // A memRecord is the bucket data for a bucket of type memProfile, 78 // part of the memory profile. 79 type memRecord struct { 80 // The following complex 3-stage scheme of stats accumulation 81 // is required to obtain a consistent picture of mallocs and frees 82 // for some point in time. 83 // The problem is that mallocs come in real time, while frees 84 // come only after a GC during concurrent sweeping. So if we would 85 // naively count them, we would get a skew toward mallocs. 86 // 87 // Hence, we delay information to get consistent snapshots as 88 // of mark termination. Allocations count toward the next mark 89 // termination's snapshot, while sweep frees count toward the 90 // previous mark termination's snapshot: 91 // 92 // MT MT MT MT 93 // .·| .·| .·| .·| 94 // .·˙ | .·˙ | .·˙ | .·˙ | 95 // .·˙ | .·˙ | .·˙ | .·˙ | 96 // .·˙ |.·˙ |.·˙ |.·˙ | 97 // 98 // alloc → ▲ ← free 99 // ┠┅┅┅┅┅┅┅┅┅┅┅P 100 // C+2 → C+1 → C 101 // 102 // alloc → ▲ ← free 103 // ┠┅┅┅┅┅┅┅┅┅┅┅P 104 // C+2 → C+1 → C 105 // 106 // Since we can't publish a consistent snapshot until all of 107 // the sweep frees are accounted for, we wait until the next 108 // mark termination ("MT" above) to publish the previous mark 109 // termination's snapshot ("P" above). To do this, allocation 110 // and free events are accounted to *future* heap profile 111 // cycles ("C+n" above) and we only publish a cycle once all 112 // of the events from that cycle must be done. Specifically: 113 // 114 // Mallocs are accounted to cycle C+2. 115 // Explicit frees are accounted to cycle C+2. 116 // GC frees (done during sweeping) are accounted to cycle C+1. 117 // 118 // After mark termination, we increment the global heap 119 // profile cycle counter and accumulate the stats from cycle C 120 // into the active profile. 121 122 // active is the currently published profile. A profiling 123 // cycle can be accumulated into active once its complete. 124 active memRecordCycle 125 126 // future records the profile events we're counting for cycles 127 // that have not yet been published. This is ring buffer 128 // indexed by the global heap profile cycle C and stores 129 // cycles C, C+1, and C+2. Unlike active, these counts are 130 // only for a single cycle; they are not cumulative across 131 // cycles. 132 // 133 // We store cycle C here because there's a window between when 134 // C becomes the active cycle and when we've flushed it to 135 // active. 136 future [3]memRecordCycle 137 } 138 139 // memRecordCycle 140 type memRecordCycle struct { 141 allocs, frees uintptr 142 alloc_bytes, free_bytes uintptr 143 } 144 145 // add accumulates b into a. It does not zero b. 146 func (a *memRecordCycle) add(b *memRecordCycle) { 147 a.allocs += b.allocs 148 a.frees += b.frees 149 a.alloc_bytes += b.alloc_bytes 150 a.free_bytes += b.free_bytes 151 } 152 153 // A blockRecord is the bucket data for a bucket of type blockProfile, 154 // which is used in blocking and mutex profiles. 155 type blockRecord struct { 156 count float64 157 cycles int64 158 } 159 160 var ( 161 mbuckets atomic.UnsafePointer // *bucket, memory profile buckets 162 bbuckets atomic.UnsafePointer // *bucket, blocking profile buckets 163 xbuckets atomic.UnsafePointer // *bucket, mutex profile buckets 164 buckhash atomic.UnsafePointer // *buckhashArray 165 166 mProfCycle mProfCycleHolder 167 ) 168 169 type buckhashArray [buckHashSize]atomic.UnsafePointer // *bucket 170 171 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24) 172 173 // mProfCycleHolder holds the global heap profile cycle number (wrapped at 174 // mProfCycleWrap, stored starting at bit 1), and a flag (stored at bit 0) to 175 // indicate whether future[cycle] in all buckets has been queued to flush into 176 // the active profile. 177 type mProfCycleHolder struct { 178 value atomic.Uint32 179 } 180 181 // read returns the current cycle count. 182 func (c *mProfCycleHolder) read() (cycle uint32) { 183 v := c.value.Load() 184 cycle = v >> 1 185 return cycle 186 } 187 188 // setFlushed sets the flushed flag. It returns the current cycle count and the 189 // previous value of the flushed flag. 190 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) { 191 for { 192 prev := c.value.Load() 193 cycle = prev >> 1 194 alreadyFlushed = (prev & 0x1) != 0 195 next := prev | 0x1 196 if c.value.CompareAndSwap(prev, next) { 197 return cycle, alreadyFlushed 198 } 199 } 200 } 201 202 // increment increases the cycle count by one, wrapping the value at 203 // mProfCycleWrap. It clears the flushed flag. 204 func (c *mProfCycleHolder) increment() { 205 // We explicitly wrap mProfCycle rather than depending on 206 // uint wraparound because the memRecord.future ring does not 207 // itself wrap at a power of two. 208 for { 209 prev := c.value.Load() 210 cycle := prev >> 1 211 cycle = (cycle + 1) % mProfCycleWrap 212 next := cycle << 1 213 if c.value.CompareAndSwap(prev, next) { 214 break 215 } 216 } 217 } 218 219 // newBucket allocates a bucket with the given type and number of stack entries. 220 func newBucket(typ bucketType, nstk int) *bucket { 221 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0)) 222 switch typ { 223 default: 224 throw("invalid profile bucket type") 225 case memProfile: 226 size += unsafe.Sizeof(memRecord{}) 227 case blockProfile, mutexProfile: 228 size += unsafe.Sizeof(blockRecord{}) 229 } 230 231 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys)) 232 b.typ = typ 233 b.nstk = uintptr(nstk) 234 return b 235 } 236 237 // stk returns the slice in b holding the stack. 238 func (b *bucket) stk() []uintptr { 239 stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) 240 if b.nstk > maxStack { 241 // prove that slicing works; otherwise a failure requires a P 242 throw("bad profile stack count") 243 } 244 return stk[:b.nstk:b.nstk] 245 } 246 247 // mp returns the memRecord associated with the memProfile bucket b. 248 func (b *bucket) mp() *memRecord { 249 if b.typ != memProfile { 250 throw("bad use of bucket.mp") 251 } 252 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) 253 return (*memRecord)(data) 254 } 255 256 // bp returns the blockRecord associated with the blockProfile bucket b. 257 func (b *bucket) bp() *blockRecord { 258 if b.typ != blockProfile && b.typ != mutexProfile { 259 throw("bad use of bucket.bp") 260 } 261 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) 262 return (*blockRecord)(data) 263 } 264 265 // Return the bucket for stk[0:nstk], allocating new bucket if needed. 266 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { 267 bh := (*buckhashArray)(buckhash.Load()) 268 if bh == nil { 269 lock(&profInsertLock) 270 // check again under the lock 271 bh = (*buckhashArray)(buckhash.Load()) 272 if bh == nil { 273 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys)) 274 if bh == nil { 275 throw("runtime: cannot allocate memory") 276 } 277 buckhash.StoreNoWB(unsafe.Pointer(bh)) 278 } 279 unlock(&profInsertLock) 280 } 281 282 // Hash stack. 283 var h uintptr 284 for _, pc := range stk { 285 h += pc 286 h += h << 10 287 h ^= h >> 6 288 } 289 // hash in size 290 h += size 291 h += h << 10 292 h ^= h >> 6 293 // finalize 294 h += h << 3 295 h ^= h >> 11 296 297 i := int(h % buckHashSize) 298 // first check optimistically, without the lock 299 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next { 300 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { 301 return b 302 } 303 } 304 305 if !alloc { 306 return nil 307 } 308 309 lock(&profInsertLock) 310 // check again under the insertion lock 311 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next { 312 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { 313 unlock(&profInsertLock) 314 return b 315 } 316 } 317 318 // Create new bucket. 319 b := newBucket(typ, len(stk)) 320 copy(b.stk(), stk) 321 b.hash = h 322 b.size = size 323 324 var allnext *atomic.UnsafePointer 325 if typ == memProfile { 326 allnext = &mbuckets 327 } else if typ == mutexProfile { 328 allnext = &xbuckets 329 } else { 330 allnext = &bbuckets 331 } 332 333 b.next = (*bucket)(bh[i].Load()) 334 b.allnext = (*bucket)(allnext.Load()) 335 336 bh[i].StoreNoWB(unsafe.Pointer(b)) 337 allnext.StoreNoWB(unsafe.Pointer(b)) 338 339 unlock(&profInsertLock) 340 return b 341 } 342 343 func eqslice(x, y []uintptr) bool { 344 if len(x) != len(y) { 345 return false 346 } 347 for i, xi := range x { 348 if xi != y[i] { 349 return false 350 } 351 } 352 return true 353 } 354 355 // mProf_NextCycle publishes the next heap profile cycle and creates a 356 // fresh heap profile cycle. This operation is fast and can be done 357 // during STW. The caller must call mProf_Flush before calling 358 // mProf_NextCycle again. 359 // 360 // This is called by mark termination during STW so allocations and 361 // frees after the world is started again count towards a new heap 362 // profiling cycle. 363 func mProf_NextCycle() { 364 mProfCycle.increment() 365 } 366 367 // mProf_Flush flushes the events from the current heap profiling 368 // cycle into the active profile. After this it is safe to start a new 369 // heap profiling cycle with mProf_NextCycle. 370 // 371 // This is called by GC after mark termination starts the world. In 372 // contrast with mProf_NextCycle, this is somewhat expensive, but safe 373 // to do concurrently. 374 func mProf_Flush() { 375 cycle, alreadyFlushed := mProfCycle.setFlushed() 376 if alreadyFlushed { 377 return 378 } 379 380 index := cycle % uint32(len(memRecord{}.future)) 381 lock(&profMemActiveLock) 382 lock(&profMemFutureLock[index]) 383 mProf_FlushLocked(index) 384 unlock(&profMemFutureLock[index]) 385 unlock(&profMemActiveLock) 386 } 387 388 // mProf_FlushLocked flushes the events from the heap profiling cycle at index 389 // into the active profile. The caller must hold the lock for the active profile 390 // (profMemActiveLock) and for the profiling cycle at index 391 // (profMemFutureLock[index]). 392 func mProf_FlushLocked(index uint32) { 393 assertLockHeld(&profMemActiveLock) 394 assertLockHeld(&profMemFutureLock[index]) 395 head := (*bucket)(mbuckets.Load()) 396 for b := head; b != nil; b = b.allnext { 397 mp := b.mp() 398 399 // Flush cycle C into the published profile and clear 400 // it for reuse. 401 mpc := &mp.future[index] 402 mp.active.add(mpc) 403 *mpc = memRecordCycle{} 404 } 405 } 406 407 // mProf_PostSweep records that all sweep frees for this GC cycle have 408 // completed. This has the effect of publishing the heap profile 409 // snapshot as of the last mark termination without advancing the heap 410 // profile cycle. 411 func mProf_PostSweep() { 412 // Flush cycle C+1 to the active profile so everything as of 413 // the last mark termination becomes visible. *Don't* advance 414 // the cycle, since we're still accumulating allocs in cycle 415 // C+2, which have to become C+1 in the next mark termination 416 // and so on. 417 cycle := mProfCycle.read() + 1 418 419 index := cycle % uint32(len(memRecord{}.future)) 420 lock(&profMemActiveLock) 421 lock(&profMemFutureLock[index]) 422 mProf_FlushLocked(index) 423 unlock(&profMemFutureLock[index]) 424 unlock(&profMemActiveLock) 425 } 426 427 // Called by malloc to record a profiled block. 428 func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) { 429 nstk := callers(4, mp.profStack) 430 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future)) 431 432 b := stkbucket(memProfile, size, mp.profStack[:nstk], true) 433 mr := b.mp() 434 mpc := &mr.future[index] 435 436 lock(&profMemFutureLock[index]) 437 mpc.allocs++ 438 mpc.alloc_bytes += size 439 unlock(&profMemFutureLock[index]) 440 441 // Setprofilebucket locks a bunch of other mutexes, so we call it outside of 442 // the profiler locks. This reduces potential contention and chances of 443 // deadlocks. Since the object must be alive during the call to 444 // mProf_Malloc, it's fine to do this non-atomically. 445 systemstack(func() { 446 setprofilebucket(p, b) 447 }) 448 } 449 450 // Called when freeing a profiled block. 451 func mProf_Free(b *bucket, size uintptr) { 452 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future)) 453 454 mp := b.mp() 455 mpc := &mp.future[index] 456 457 lock(&profMemFutureLock[index]) 458 mpc.frees++ 459 mpc.free_bytes += size 460 unlock(&profMemFutureLock[index]) 461 } 462 463 var blockprofilerate uint64 // in CPU ticks 464 465 // SetBlockProfileRate controls the fraction of goroutine blocking events 466 // that are reported in the blocking profile. The profiler aims to sample 467 // an average of one blocking event per rate nanoseconds spent blocked. 468 // 469 // To include every blocking event in the profile, pass rate = 1. 470 // To turn off profiling entirely, pass rate <= 0. 471 func SetBlockProfileRate(rate int) { 472 var r int64 473 if rate <= 0 { 474 r = 0 // disable profiling 475 } else if rate == 1 { 476 r = 1 // profile everything 477 } else { 478 // convert ns to cycles, use float64 to prevent overflow during multiplication 479 r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000)) 480 if r == 0 { 481 r = 1 482 } 483 } 484 485 atomic.Store64(&blockprofilerate, uint64(r)) 486 } 487 488 func blockevent(cycles int64, skip int) { 489 if cycles <= 0 { 490 cycles = 1 491 } 492 493 rate := int64(atomic.Load64(&blockprofilerate)) 494 if blocksampled(cycles, rate) { 495 saveblockevent(cycles, rate, skip+1, blockProfile) 496 } 497 } 498 499 // blocksampled returns true for all events where cycles >= rate. Shorter 500 // events have a cycles/rate random chance of returning true. 501 func blocksampled(cycles, rate int64) bool { 502 if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) { 503 return false 504 } 505 return true 506 } 507 508 // saveblockevent records a profile event of the type specified by which. 509 // cycles is the quantity associated with this event and rate is the sampling rate, 510 // used to adjust the cycles value in the manner determined by the profile type. 511 // skip is the number of frames to omit from the traceback associated with the event. 512 // The traceback will be recorded from the stack of the goroutine associated with the current m. 513 // skip should be positive if this event is recorded from the current stack 514 // (e.g. when this is not called from a system stack) 515 func saveblockevent(cycles, rate int64, skip int, which bucketType) { 516 gp := getg() 517 mp := acquirem() // we must not be preempted while accessing profstack 518 nstk := 1 519 if tracefpunwindoff() || gp.m.hasCgoOnStack() { 520 mp.profStack[0] = logicalStackSentinel 521 if gp.m.curg == nil || gp.m.curg == gp { 522 nstk = callers(skip, mp.profStack[1:]) 523 } else { 524 nstk = gcallers(gp.m.curg, skip, mp.profStack[1:]) 525 } 526 } else { 527 mp.profStack[0] = uintptr(skip) 528 if gp.m.curg == nil || gp.m.curg == gp { 529 if skip > 0 { 530 // We skip one fewer frame than the provided value for frame 531 // pointer unwinding because the skip value includes the current 532 // frame, whereas the saved frame pointer will give us the 533 // caller's return address first (so, not including 534 // saveblockevent) 535 mp.profStack[0] -= 1 536 } 537 nstk += fpTracebackPCs(unsafe.Pointer(getfp()), mp.profStack[1:]) 538 } else { 539 mp.profStack[1] = gp.m.curg.sched.pc 540 nstk += 1 + fpTracebackPCs(unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[2:]) 541 } 542 } 543 544 saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which) 545 releasem(mp) 546 } 547 548 // lockTimer assists with profiling contention on runtime-internal locks. 549 // 550 // There are several steps between the time that an M experiences contention and 551 // when that contention may be added to the profile. This comes from our 552 // constraints: We need to keep the critical section of each lock small, 553 // especially when those locks are contended. The reporting code cannot acquire 554 // new locks until the M has released all other locks, which means no memory 555 // allocations and encourages use of (temporary) M-local storage. 556 // 557 // The M will have space for storing one call stack that caused contention, and 558 // for the magnitude of that contention. It will also have space to store the 559 // magnitude of additional contention the M caused, since it only has space to 560 // remember one call stack and might encounter several contention events before 561 // it releases all of its locks and is thus able to transfer the local buffer 562 // into the profile. 563 // 564 // The M will collect the call stack when it unlocks the contended lock. That 565 // minimizes the impact on the critical section of the contended lock, and 566 // matches the mutex profile's behavior for contention in sync.Mutex: measured 567 // at the Unlock method. 568 // 569 // The profile for contention on sync.Mutex blames the caller of Unlock for the 570 // amount of contention experienced by the callers of Lock which had to wait. 571 // When there are several critical sections, this allows identifying which of 572 // them is responsible. 573 // 574 // Matching that behavior for runtime-internal locks will require identifying 575 // which Ms are blocked on the mutex. The semaphore-based implementation is 576 // ready to allow that, but the futex-based implementation will require a bit 577 // more work. Until then, we report contention on runtime-internal locks with a 578 // call stack taken from the unlock call (like the rest of the user-space 579 // "mutex" profile), but assign it a duration value based on how long the 580 // previous lock call took (like the user-space "block" profile). 581 // 582 // Thus, reporting the call stacks of runtime-internal lock contention is 583 // guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable. 584 // 585 // TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment 586 // 587 // The M will track this by storing a pointer to the lock; lock/unlock pairs for 588 // runtime-internal locks are always on the same M. 589 // 590 // Together, that demands several steps for recording contention. First, when 591 // finally acquiring a contended lock, the M decides whether it should plan to 592 // profile that event by storing a pointer to the lock in its "to be profiled 593 // upon unlock" field. If that field is already set, it uses the relative 594 // magnitudes to weight a random choice between itself and the other lock, with 595 // the loser's time being added to the "additional contention" field. Otherwise 596 // if the M's call stack buffer is occupied, it does the comparison against that 597 // sample's magnitude. 598 // 599 // Second, having unlocked a mutex the M checks to see if it should capture the 600 // call stack into its local buffer. Finally, when the M unlocks its last mutex, 601 // it transfers the local buffer into the profile. As part of that step, it also 602 // transfers any "additional contention" time to the profile. Any lock 603 // contention that it experiences while adding samples to the profile will be 604 // recorded later as "additional contention" and not include a call stack, to 605 // avoid an echo. 606 type lockTimer struct { 607 lock *mutex 608 timeRate int64 609 timeStart int64 610 tickStart int64 611 } 612 613 func (lt *lockTimer) begin() { 614 rate := int64(atomic.Load64(&mutexprofilerate)) 615 616 lt.timeRate = gTrackingPeriod 617 if rate != 0 && rate < lt.timeRate { 618 lt.timeRate = rate 619 } 620 if int64(cheaprand())%lt.timeRate == 0 { 621 lt.timeStart = nanotime() 622 } 623 624 if rate > 0 && int64(cheaprand())%rate == 0 { 625 lt.tickStart = cputicks() 626 } 627 } 628 629 func (lt *lockTimer) end() { 630 gp := getg() 631 632 if lt.timeStart != 0 { 633 nowTime := nanotime() 634 gp.m.mLockProfile.waitTime.Add((nowTime - lt.timeStart) * lt.timeRate) 635 } 636 637 if lt.tickStart != 0 { 638 nowTick := cputicks() 639 gp.m.mLockProfile.recordLock(nowTick-lt.tickStart, lt.lock) 640 } 641 } 642 643 type mLockProfile struct { 644 waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank 645 stack []uintptr // stack that experienced contention in runtime.lockWithRank 646 pending uintptr // *mutex that experienced contention (to be traceback-ed) 647 cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack" 648 cyclesLost int64 // contention for which we weren't able to record a call stack 649 disabled bool // attribute all time to "lost" 650 } 651 652 func (prof *mLockProfile) recordLock(cycles int64, l *mutex) { 653 if cycles <= 0 { 654 return 655 } 656 657 if prof.disabled { 658 // We're experiencing contention while attempting to report contention. 659 // Make a note of its magnitude, but don't allow it to be the sole cause 660 // of another contention report. 661 prof.cyclesLost += cycles 662 return 663 } 664 665 if uintptr(unsafe.Pointer(l)) == prof.pending { 666 // Optimization: we'd already planned to profile this same lock (though 667 // possibly from a different unlock site). 668 prof.cycles += cycles 669 return 670 } 671 672 if prev := prof.cycles; prev > 0 { 673 // We can only store one call stack for runtime-internal lock contention 674 // on this M, and we've already got one. Decide which should stay, and 675 // add the other to the report for runtime._LostContendedRuntimeLock. 676 prevScore := uint64(cheaprand64()) % uint64(prev) 677 thisScore := uint64(cheaprand64()) % uint64(cycles) 678 if prevScore > thisScore { 679 prof.cyclesLost += cycles 680 return 681 } else { 682 prof.cyclesLost += prev 683 } 684 } 685 // Saving the *mutex as a uintptr is safe because: 686 // - lockrank_on.go does this too, which gives it regular exercise 687 // - the lock would only move if it's stack allocated, which means it 688 // cannot experience multi-M contention 689 prof.pending = uintptr(unsafe.Pointer(l)) 690 prof.cycles = cycles 691 } 692 693 // From unlock2, we might not be holding a p in this code. 694 // 695 //go:nowritebarrierrec 696 func (prof *mLockProfile) recordUnlock(l *mutex) { 697 if uintptr(unsafe.Pointer(l)) == prof.pending { 698 prof.captureStack() 699 } 700 if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.cycles != 0 { 701 prof.store() 702 } 703 } 704 705 func (prof *mLockProfile) captureStack() { 706 skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank 707 if staticLockRanking { 708 // When static lock ranking is enabled, we'll always be on the system 709 // stack at this point. There will be a runtime.unlockWithRank.func1 710 // frame, and if the call to runtime.unlock took place on a user stack 711 // then there'll also be a runtime.systemstack frame. To keep stack 712 // traces somewhat consistent whether or not static lock ranking is 713 // enabled, we'd like to skip those. But it's hard to tell how long 714 // we've been on the system stack so accept an extra frame in that case, 715 // with a leaf of "runtime.unlockWithRank runtime.unlock" instead of 716 // "runtime.unlock". 717 skip += 1 // runtime.unlockWithRank.func1 718 } 719 prof.pending = 0 720 721 prof.stack[0] = logicalStackSentinel 722 if debug.runtimeContentionStacks.Load() == 0 { 723 prof.stack[1] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum 724 prof.stack[2] = 0 725 return 726 } 727 728 var nstk int 729 gp := getg() 730 sp := getcallersp() 731 pc := getcallerpc() 732 systemstack(func() { 733 var u unwinder 734 u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack) 735 nstk = 1 + tracebackPCs(&u, skip, prof.stack[1:]) 736 }) 737 if nstk < len(prof.stack) { 738 prof.stack[nstk] = 0 739 } 740 } 741 742 func (prof *mLockProfile) store() { 743 // Report any contention we experience within this function as "lost"; it's 744 // important that the act of reporting a contention event not lead to a 745 // reportable contention event. This also means we can use prof.stack 746 // without copying, since it won't change during this function. 747 mp := acquirem() 748 prof.disabled = true 749 750 nstk := maxStack 751 for i := 0; i < nstk; i++ { 752 if pc := prof.stack[i]; pc == 0 { 753 nstk = i 754 break 755 } 756 } 757 758 cycles, lost := prof.cycles, prof.cyclesLost 759 prof.cycles, prof.cyclesLost = 0, 0 760 761 rate := int64(atomic.Load64(&mutexprofilerate)) 762 saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile) 763 if lost > 0 { 764 lostStk := [...]uintptr{ 765 logicalStackSentinel, 766 abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum, 767 } 768 saveBlockEventStack(lost, rate, lostStk[:], mutexProfile) 769 } 770 771 prof.disabled = false 772 releasem(mp) 773 } 774 775 func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) { 776 b := stkbucket(which, 0, stk, true) 777 bp := b.bp() 778 779 lock(&profBlockLock) 780 // We want to up-scale the count and cycles according to the 781 // probability that the event was sampled. For block profile events, 782 // the sample probability is 1 if cycles >= rate, and cycles / rate 783 // otherwise. For mutex profile events, the sample probability is 1 / rate. 784 // We scale the events by 1 / (probability the event was sampled). 785 if which == blockProfile && cycles < rate { 786 // Remove sampling bias, see discussion on http://golang.org/cl/299991. 787 bp.count += float64(rate) / float64(cycles) 788 bp.cycles += rate 789 } else if which == mutexProfile { 790 bp.count += float64(rate) 791 bp.cycles += rate * cycles 792 } else { 793 bp.count++ 794 bp.cycles += cycles 795 } 796 unlock(&profBlockLock) 797 } 798 799 var mutexprofilerate uint64 // fraction sampled 800 801 // SetMutexProfileFraction controls the fraction of mutex contention events 802 // that are reported in the mutex profile. On average 1/rate events are 803 // reported. The previous rate is returned. 804 // 805 // To turn off profiling entirely, pass rate 0. 806 // To just read the current rate, pass rate < 0. 807 // (For n>1 the details of sampling may change.) 808 func SetMutexProfileFraction(rate int) int { 809 if rate < 0 { 810 return int(mutexprofilerate) 811 } 812 old := mutexprofilerate 813 atomic.Store64(&mutexprofilerate, uint64(rate)) 814 return int(old) 815 } 816 817 //go:linkname mutexevent sync.event 818 func mutexevent(cycles int64, skip int) { 819 if cycles < 0 { 820 cycles = 0 821 } 822 rate := int64(atomic.Load64(&mutexprofilerate)) 823 if rate > 0 && cheaprand64()%rate == 0 { 824 saveblockevent(cycles, rate, skip+1, mutexProfile) 825 } 826 } 827 828 // Go interface to profile data. 829 830 // A StackRecord describes a single execution stack. 831 type StackRecord struct { 832 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry 833 } 834 835 // Stack returns the stack trace associated with the record, 836 // a prefix of r.Stack0. 837 func (r *StackRecord) Stack() []uintptr { 838 for i, v := range r.Stack0 { 839 if v == 0 { 840 return r.Stack0[0:i] 841 } 842 } 843 return r.Stack0[0:] 844 } 845 846 // MemProfileRate controls the fraction of memory allocations 847 // that are recorded and reported in the memory profile. 848 // The profiler aims to sample an average of 849 // one allocation per MemProfileRate bytes allocated. 850 // 851 // To include every allocated block in the profile, set MemProfileRate to 1. 852 // To turn off profiling entirely, set MemProfileRate to 0. 853 // 854 // The tools that process the memory profiles assume that the 855 // profile rate is constant across the lifetime of the program 856 // and equal to the current value. Programs that change the 857 // memory profiling rate should do so just once, as early as 858 // possible in the execution of the program (for example, 859 // at the beginning of main). 860 var MemProfileRate int = 512 * 1024 861 862 // disableMemoryProfiling is set by the linker if runtime.MemProfile 863 // is not used and the link type guarantees nobody else could use it 864 // elsewhere. 865 var disableMemoryProfiling bool 866 867 // A MemProfileRecord describes the live objects allocated 868 // by a particular call sequence (stack trace). 869 type MemProfileRecord struct { 870 AllocBytes, FreeBytes int64 // number of bytes allocated, freed 871 AllocObjects, FreeObjects int64 // number of objects allocated, freed 872 Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry 873 } 874 875 // InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes). 876 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes } 877 878 // InUseObjects returns the number of objects in use (AllocObjects - FreeObjects). 879 func (r *MemProfileRecord) InUseObjects() int64 { 880 return r.AllocObjects - r.FreeObjects 881 } 882 883 // Stack returns the stack trace associated with the record, 884 // a prefix of r.Stack0. 885 func (r *MemProfileRecord) Stack() []uintptr { 886 for i, v := range r.Stack0 { 887 if v == 0 { 888 return r.Stack0[0:i] 889 } 890 } 891 return r.Stack0[0:] 892 } 893 894 // MemProfile returns a profile of memory allocated and freed per allocation 895 // site. 896 // 897 // MemProfile returns n, the number of records in the current memory profile. 898 // If len(p) >= n, MemProfile copies the profile into p and returns n, true. 899 // If len(p) < n, MemProfile does not change p and returns n, false. 900 // 901 // If inuseZero is true, the profile includes allocation records 902 // where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. 903 // These are sites where memory was allocated, but it has all 904 // been released back to the runtime. 905 // 906 // The returned profile may be up to two garbage collection cycles old. 907 // This is to avoid skewing the profile toward allocations; because 908 // allocations happen in real time but frees are delayed until the garbage 909 // collector performs sweeping, the profile only accounts for allocations 910 // that have had a chance to be freed by the garbage collector. 911 // 912 // Most clients should use the runtime/pprof package or 913 // the testing package's -test.memprofile flag instead 914 // of calling MemProfile directly. 915 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { 916 cycle := mProfCycle.read() 917 // If we're between mProf_NextCycle and mProf_Flush, take care 918 // of flushing to the active profile so we only have to look 919 // at the active profile below. 920 index := cycle % uint32(len(memRecord{}.future)) 921 lock(&profMemActiveLock) 922 lock(&profMemFutureLock[index]) 923 mProf_FlushLocked(index) 924 unlock(&profMemFutureLock[index]) 925 clear := true 926 head := (*bucket)(mbuckets.Load()) 927 for b := head; b != nil; b = b.allnext { 928 mp := b.mp() 929 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { 930 n++ 931 } 932 if mp.active.allocs != 0 || mp.active.frees != 0 { 933 clear = false 934 } 935 } 936 if clear { 937 // Absolutely no data, suggesting that a garbage collection 938 // has not yet happened. In order to allow profiling when 939 // garbage collection is disabled from the beginning of execution, 940 // accumulate all of the cycles, and recount buckets. 941 n = 0 942 for b := head; b != nil; b = b.allnext { 943 mp := b.mp() 944 for c := range mp.future { 945 lock(&profMemFutureLock[c]) 946 mp.active.add(&mp.future[c]) 947 mp.future[c] = memRecordCycle{} 948 unlock(&profMemFutureLock[c]) 949 } 950 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { 951 n++ 952 } 953 } 954 } 955 if n <= len(p) { 956 ok = true 957 idx := 0 958 for b := head; b != nil; b = b.allnext { 959 mp := b.mp() 960 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes { 961 record(&p[idx], b) 962 idx++ 963 } 964 } 965 } 966 unlock(&profMemActiveLock) 967 return 968 } 969 970 // Write b's data to r. 971 func record(r *MemProfileRecord, b *bucket) { 972 mp := b.mp() 973 r.AllocBytes = int64(mp.active.alloc_bytes) 974 r.FreeBytes = int64(mp.active.free_bytes) 975 r.AllocObjects = int64(mp.active.allocs) 976 r.FreeObjects = int64(mp.active.frees) 977 if raceenabled { 978 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile)) 979 } 980 if msanenabled { 981 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) 982 } 983 if asanenabled { 984 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) 985 } 986 i := copy(r.Stack0[:], b.stk()) 987 clear(r.Stack0[i:]) 988 } 989 990 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) { 991 lock(&profMemActiveLock) 992 head := (*bucket)(mbuckets.Load()) 993 for b := head; b != nil; b = b.allnext { 994 mp := b.mp() 995 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees) 996 } 997 unlock(&profMemActiveLock) 998 } 999 1000 // BlockProfileRecord describes blocking events originated 1001 // at a particular call sequence (stack trace). 1002 type BlockProfileRecord struct { 1003 Count int64 1004 Cycles int64 1005 StackRecord 1006 } 1007 1008 // BlockProfile returns n, the number of records in the current blocking profile. 1009 // If len(p) >= n, BlockProfile copies the profile into p and returns n, true. 1010 // If len(p) < n, BlockProfile does not change p and returns n, false. 1011 // 1012 // Most clients should use the [runtime/pprof] package or 1013 // the [testing] package's -test.blockprofile flag instead 1014 // of calling BlockProfile directly. 1015 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { 1016 lock(&profBlockLock) 1017 head := (*bucket)(bbuckets.Load()) 1018 for b := head; b != nil; b = b.allnext { 1019 n++ 1020 } 1021 if n <= len(p) { 1022 ok = true 1023 for b := head; b != nil; b = b.allnext { 1024 bp := b.bp() 1025 r := &p[0] 1026 r.Count = int64(bp.count) 1027 // Prevent callers from having to worry about division by zero errors. 1028 // See discussion on http://golang.org/cl/299991. 1029 if r.Count == 0 { 1030 r.Count = 1 1031 } 1032 r.Cycles = bp.cycles 1033 if raceenabled { 1034 racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile)) 1035 } 1036 if msanenabled { 1037 msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) 1038 } 1039 if asanenabled { 1040 asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0)) 1041 } 1042 i := fpunwindExpand(r.Stack0[:], b.stk()) 1043 clear(r.Stack0[i:]) 1044 p = p[1:] 1045 } 1046 } 1047 unlock(&profBlockLock) 1048 return 1049 } 1050 1051 // MutexProfile returns n, the number of records in the current mutex profile. 1052 // If len(p) >= n, MutexProfile copies the profile into p and returns n, true. 1053 // Otherwise, MutexProfile does not change p, and returns n, false. 1054 // 1055 // Most clients should use the [runtime/pprof] package 1056 // instead of calling MutexProfile directly. 1057 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) { 1058 lock(&profBlockLock) 1059 head := (*bucket)(xbuckets.Load()) 1060 for b := head; b != nil; b = b.allnext { 1061 n++ 1062 } 1063 if n <= len(p) { 1064 ok = true 1065 for b := head; b != nil; b = b.allnext { 1066 bp := b.bp() 1067 r := &p[0] 1068 r.Count = int64(bp.count) 1069 r.Cycles = bp.cycles 1070 i := fpunwindExpand(r.Stack0[:], b.stk()) 1071 clear(r.Stack0[i:]) 1072 p = p[1:] 1073 } 1074 } 1075 unlock(&profBlockLock) 1076 return 1077 } 1078 1079 // ThreadCreateProfile returns n, the number of records in the thread creation profile. 1080 // If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true. 1081 // If len(p) < n, ThreadCreateProfile does not change p and returns n, false. 1082 // 1083 // Most clients should use the runtime/pprof package instead 1084 // of calling ThreadCreateProfile directly. 1085 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { 1086 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm))) 1087 for mp := first; mp != nil; mp = mp.alllink { 1088 n++ 1089 } 1090 if n <= len(p) { 1091 ok = true 1092 i := 0 1093 for mp := first; mp != nil; mp = mp.alllink { 1094 p[i].Stack0 = mp.createstack 1095 i++ 1096 } 1097 } 1098 return 1099 } 1100 1101 //go:linkname runtime_goroutineProfileWithLabels runtime/pprof.runtime_goroutineProfileWithLabels 1102 func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { 1103 return goroutineProfileWithLabels(p, labels) 1104 } 1105 1106 // labels may be nil. If labels is non-nil, it must have the same length as p. 1107 func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { 1108 if labels != nil && len(labels) != len(p) { 1109 labels = nil 1110 } 1111 1112 return goroutineProfileWithLabelsConcurrent(p, labels) 1113 } 1114 1115 var goroutineProfile = struct { 1116 sema uint32 1117 active bool 1118 offset atomic.Int64 1119 records []StackRecord 1120 labels []unsafe.Pointer 1121 }{ 1122 sema: 1, 1123 } 1124 1125 // goroutineProfileState indicates the status of a goroutine's stack for the 1126 // current in-progress goroutine profile. Goroutines' stacks are initially 1127 // "Absent" from the profile, and end up "Satisfied" by the time the profile is 1128 // complete. While a goroutine's stack is being captured, its 1129 // goroutineProfileState will be "InProgress" and it will not be able to run 1130 // until the capture completes and the state moves to "Satisfied". 1131 // 1132 // Some goroutines (the finalizer goroutine, which at various times can be 1133 // either a "system" or a "user" goroutine, and the goroutine that is 1134 // coordinating the profile, any goroutines created during the profile) move 1135 // directly to the "Satisfied" state. 1136 type goroutineProfileState uint32 1137 1138 const ( 1139 goroutineProfileAbsent goroutineProfileState = iota 1140 goroutineProfileInProgress 1141 goroutineProfileSatisfied 1142 ) 1143 1144 type goroutineProfileStateHolder atomic.Uint32 1145 1146 func (p *goroutineProfileStateHolder) Load() goroutineProfileState { 1147 return goroutineProfileState((*atomic.Uint32)(p).Load()) 1148 } 1149 1150 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) { 1151 (*atomic.Uint32)(p).Store(uint32(value)) 1152 } 1153 1154 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool { 1155 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new)) 1156 } 1157 1158 func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { 1159 if len(p) == 0 { 1160 // An empty slice is obviously too small. Return a rough 1161 // allocation estimate without bothering to STW. As long as 1162 // this is close, then we'll only need to STW once (on the next 1163 // call). 1164 return int(gcount()), false 1165 } 1166 1167 semacquire(&goroutineProfile.sema) 1168 1169 ourg := getg() 1170 1171 stw := stopTheWorld(stwGoroutineProfile) 1172 // Using gcount while the world is stopped should give us a consistent view 1173 // of the number of live goroutines, minus the number of goroutines that are 1174 // alive and permanently marked as "system". But to make this count agree 1175 // with what we'd get from isSystemGoroutine, we need special handling for 1176 // goroutines that can vary between user and system to ensure that the count 1177 // doesn't change during the collection. So, check the finalizer goroutine 1178 // in particular. 1179 n = int(gcount()) 1180 if fingStatus.Load()&fingRunningFinalizer != 0 { 1181 n++ 1182 } 1183 1184 if n > len(p) { 1185 // There's not enough space in p to store the whole profile, so (per the 1186 // contract of runtime.GoroutineProfile) we're not allowed to write to p 1187 // at all and must return n, false. 1188 startTheWorld(stw) 1189 semrelease(&goroutineProfile.sema) 1190 return n, false 1191 } 1192 1193 // Save current goroutine. 1194 sp := getcallersp() 1195 pc := getcallerpc() 1196 systemstack(func() { 1197 saveg(pc, sp, ourg, &p[0]) 1198 }) 1199 if labels != nil { 1200 labels[0] = ourg.labels 1201 } 1202 ourg.goroutineProfiled.Store(goroutineProfileSatisfied) 1203 goroutineProfile.offset.Store(1) 1204 1205 // Prepare for all other goroutines to enter the profile. Aside from ourg, 1206 // every goroutine struct in the allgs list has its goroutineProfiled field 1207 // cleared. Any goroutine created from this point on (while 1208 // goroutineProfile.active is set) will start with its goroutineProfiled 1209 // field set to goroutineProfileSatisfied. 1210 goroutineProfile.active = true 1211 goroutineProfile.records = p 1212 goroutineProfile.labels = labels 1213 // The finalizer goroutine needs special handling because it can vary over 1214 // time between being a user goroutine (eligible for this profile) and a 1215 // system goroutine (to be excluded). Pick one before restarting the world. 1216 if fing != nil { 1217 fing.goroutineProfiled.Store(goroutineProfileSatisfied) 1218 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) { 1219 doRecordGoroutineProfile(fing) 1220 } 1221 } 1222 startTheWorld(stw) 1223 1224 // Visit each goroutine that existed as of the startTheWorld call above. 1225 // 1226 // New goroutines may not be in this list, but we didn't want to know about 1227 // them anyway. If they do appear in this list (via reusing a dead goroutine 1228 // struct, or racing to launch between the world restarting and us getting 1229 // the list), they will already have their goroutineProfiled field set to 1230 // goroutineProfileSatisfied before their state transitions out of _Gdead. 1231 // 1232 // Any goroutine that the scheduler tries to execute concurrently with this 1233 // call will start by adding itself to the profile (before the act of 1234 // executing can cause any changes in its stack). 1235 forEachGRace(func(gp1 *g) { 1236 tryRecordGoroutineProfile(gp1, Gosched) 1237 }) 1238 1239 stw = stopTheWorld(stwGoroutineProfileCleanup) 1240 endOffset := goroutineProfile.offset.Swap(0) 1241 goroutineProfile.active = false 1242 goroutineProfile.records = nil 1243 goroutineProfile.labels = nil 1244 startTheWorld(stw) 1245 1246 // Restore the invariant that every goroutine struct in allgs has its 1247 // goroutineProfiled field cleared. 1248 forEachGRace(func(gp1 *g) { 1249 gp1.goroutineProfiled.Store(goroutineProfileAbsent) 1250 }) 1251 1252 if raceenabled { 1253 raceacquire(unsafe.Pointer(&labelSync)) 1254 } 1255 1256 if n != int(endOffset) { 1257 // It's a big surprise that the number of goroutines changed while we 1258 // were collecting the profile. But probably better to return a 1259 // truncated profile than to crash the whole process. 1260 // 1261 // For instance, needm moves a goroutine out of the _Gdead state and so 1262 // might be able to change the goroutine count without interacting with 1263 // the scheduler. For code like that, the race windows are small and the 1264 // combination of features is uncommon, so it's hard to be (and remain) 1265 // sure we've caught them all. 1266 } 1267 1268 semrelease(&goroutineProfile.sema) 1269 return n, true 1270 } 1271 1272 // tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls 1273 // tryRecordGoroutineProfile. 1274 // 1275 //go:yeswritebarrierrec 1276 func tryRecordGoroutineProfileWB(gp1 *g) { 1277 if getg().m.p.ptr() == nil { 1278 throw("no P available, write barriers are forbidden") 1279 } 1280 tryRecordGoroutineProfile(gp1, osyield) 1281 } 1282 1283 // tryRecordGoroutineProfile ensures that gp1 has the appropriate representation 1284 // in the current goroutine profile: either that it should not be profiled, or 1285 // that a snapshot of its call stack and labels are now in the profile. 1286 func tryRecordGoroutineProfile(gp1 *g, yield func()) { 1287 if readgstatus(gp1) == _Gdead { 1288 // Dead goroutines should not appear in the profile. Goroutines that 1289 // start while profile collection is active will get goroutineProfiled 1290 // set to goroutineProfileSatisfied before transitioning out of _Gdead, 1291 // so here we check _Gdead first. 1292 return 1293 } 1294 if isSystemGoroutine(gp1, true) { 1295 // System goroutines should not appear in the profile. (The finalizer 1296 // goroutine is marked as "already profiled".) 1297 return 1298 } 1299 1300 for { 1301 prev := gp1.goroutineProfiled.Load() 1302 if prev == goroutineProfileSatisfied { 1303 // This goroutine is already in the profile (or is new since the 1304 // start of collection, so shouldn't appear in the profile). 1305 break 1306 } 1307 if prev == goroutineProfileInProgress { 1308 // Something else is adding gp1 to the goroutine profile right now. 1309 // Give that a moment to finish. 1310 yield() 1311 continue 1312 } 1313 1314 // While we have gp1.goroutineProfiled set to 1315 // goroutineProfileInProgress, gp1 may appear _Grunnable but will not 1316 // actually be able to run. Disable preemption for ourselves, to make 1317 // sure we finish profiling gp1 right away instead of leaving it stuck 1318 // in this limbo. 1319 mp := acquirem() 1320 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) { 1321 doRecordGoroutineProfile(gp1) 1322 gp1.goroutineProfiled.Store(goroutineProfileSatisfied) 1323 } 1324 releasem(mp) 1325 } 1326 } 1327 1328 // doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress 1329 // goroutine profile. Preemption is disabled. 1330 // 1331 // This may be called via tryRecordGoroutineProfile in two ways: by the 1332 // goroutine that is coordinating the goroutine profile (running on its own 1333 // stack), or from the scheduler in preparation to execute gp1 (running on the 1334 // system stack). 1335 func doRecordGoroutineProfile(gp1 *g) { 1336 if readgstatus(gp1) == _Grunning { 1337 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n") 1338 throw("cannot read stack of running goroutine") 1339 } 1340 1341 offset := int(goroutineProfile.offset.Add(1)) - 1 1342 1343 if offset >= len(goroutineProfile.records) { 1344 // Should be impossible, but better to return a truncated profile than 1345 // to crash the entire process at this point. Instead, deal with it in 1346 // goroutineProfileWithLabelsConcurrent where we have more context. 1347 return 1348 } 1349 1350 // saveg calls gentraceback, which may call cgo traceback functions. When 1351 // called from the scheduler, this is on the system stack already so 1352 // traceback.go:cgoContextPCs will avoid calling back into the scheduler. 1353 // 1354 // When called from the goroutine coordinating the profile, we still have 1355 // set gp1.goroutineProfiled to goroutineProfileInProgress and so are still 1356 // preventing it from being truly _Grunnable. So we'll use the system stack 1357 // to avoid schedule delays. 1358 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset]) }) 1359 1360 if goroutineProfile.labels != nil { 1361 goroutineProfile.labels[offset] = gp1.labels 1362 } 1363 } 1364 1365 func goroutineProfileWithLabelsSync(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { 1366 gp := getg() 1367 1368 isOK := func(gp1 *g) bool { 1369 // Checking isSystemGoroutine here makes GoroutineProfile 1370 // consistent with both NumGoroutine and Stack. 1371 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false) 1372 } 1373 1374 stw := stopTheWorld(stwGoroutineProfile) 1375 1376 // World is stopped, no locking required. 1377 n = 1 1378 forEachGRace(func(gp1 *g) { 1379 if isOK(gp1) { 1380 n++ 1381 } 1382 }) 1383 1384 if n <= len(p) { 1385 ok = true 1386 r, lbl := p, labels 1387 1388 // Save current goroutine. 1389 sp := getcallersp() 1390 pc := getcallerpc() 1391 systemstack(func() { 1392 saveg(pc, sp, gp, &r[0]) 1393 }) 1394 r = r[1:] 1395 1396 // If we have a place to put our goroutine labelmap, insert it there. 1397 if labels != nil { 1398 lbl[0] = gp.labels 1399 lbl = lbl[1:] 1400 } 1401 1402 // Save other goroutines. 1403 forEachGRace(func(gp1 *g) { 1404 if !isOK(gp1) { 1405 return 1406 } 1407 1408 if len(r) == 0 { 1409 // Should be impossible, but better to return a 1410 // truncated profile than to crash the entire process. 1411 return 1412 } 1413 // saveg calls gentraceback, which may call cgo traceback functions. 1414 // The world is stopped, so it cannot use cgocall (which will be 1415 // blocked at exitsyscall). Do it on the system stack so it won't 1416 // call into the schedular (see traceback.go:cgoContextPCs). 1417 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) }) 1418 if labels != nil { 1419 lbl[0] = gp1.labels 1420 lbl = lbl[1:] 1421 } 1422 r = r[1:] 1423 }) 1424 } 1425 1426 if raceenabled { 1427 raceacquire(unsafe.Pointer(&labelSync)) 1428 } 1429 1430 startTheWorld(stw) 1431 return n, ok 1432 } 1433 1434 // GoroutineProfile returns n, the number of records in the active goroutine stack profile. 1435 // If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. 1436 // If len(p) < n, GoroutineProfile does not change p and returns n, false. 1437 // 1438 // Most clients should use the [runtime/pprof] package instead 1439 // of calling GoroutineProfile directly. 1440 func GoroutineProfile(p []StackRecord) (n int, ok bool) { 1441 1442 return goroutineProfileWithLabels(p, nil) 1443 } 1444 1445 func saveg(pc, sp uintptr, gp *g, r *StackRecord) { 1446 var u unwinder 1447 u.initAt(pc, sp, 0, gp, unwindSilentErrors) 1448 n := tracebackPCs(&u, 0, r.Stack0[:]) 1449 if n < len(r.Stack0) { 1450 r.Stack0[n] = 0 1451 } 1452 } 1453 1454 // Stack formats a stack trace of the calling goroutine into buf 1455 // and returns the number of bytes written to buf. 1456 // If all is true, Stack formats stack traces of all other goroutines 1457 // into buf after the trace for the current goroutine. 1458 func Stack(buf []byte, all bool) int { 1459 var stw worldStop 1460 if all { 1461 stw = stopTheWorld(stwAllGoroutinesStack) 1462 } 1463 1464 n := 0 1465 if len(buf) > 0 { 1466 gp := getg() 1467 sp := getcallersp() 1468 pc := getcallerpc() 1469 systemstack(func() { 1470 g0 := getg() 1471 // Force traceback=1 to override GOTRACEBACK setting, 1472 // so that Stack's results are consistent. 1473 // GOTRACEBACK is only about crash dumps. 1474 g0.m.traceback = 1 1475 g0.writebuf = buf[0:0:len(buf)] 1476 goroutineheader(gp) 1477 traceback(pc, sp, 0, gp) 1478 if all { 1479 tracebackothers(gp) 1480 } 1481 g0.m.traceback = 0 1482 n = len(g0.writebuf) 1483 g0.writebuf = nil 1484 }) 1485 } 1486 1487 if all { 1488 startTheWorld(stw) 1489 } 1490 return n 1491 } 1492