Source file src/runtime/traceruntime.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime -> tracer API.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  	_ "unsafe" // for go:linkname
    12  )
    13  
    14  // gTraceState is per-G state for the tracer.
    15  type gTraceState struct {
    16  	traceSchedResourceState
    17  }
    18  
    19  // reset resets the gTraceState for a new goroutine.
    20  func (s *gTraceState) reset() {
    21  	s.seq = [2]uint64{}
    22  	// N.B. s.statusTraced is managed and cleared separately.
    23  }
    24  
    25  // mTraceState is per-M state for the tracer.
    26  type mTraceState struct {
    27  	seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
    28  	buf     [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
    29  	link    *m             // Snapshot of alllink or freelink.
    30  }
    31  
    32  // pTraceState is per-P state for the tracer.
    33  type pTraceState struct {
    34  	traceSchedResourceState
    35  
    36  	// mSyscallID is the ID of the M this was bound to before entering a syscall.
    37  	mSyscallID int64
    38  
    39  	// maySweep indicates the sweep events should be traced.
    40  	// This is used to defer the sweep start event until a span
    41  	// has actually been swept.
    42  	maySweep bool
    43  
    44  	// inSweep indicates that at least one sweep event has been traced.
    45  	inSweep bool
    46  
    47  	// swept and reclaimed track the number of bytes swept and reclaimed
    48  	// by sweeping in the current sweep loop (while maySweep was true).
    49  	swept, reclaimed uintptr
    50  }
    51  
    52  // traceLockInit initializes global trace locks.
    53  func traceLockInit() {
    54  	// Sharing a lock rank here is fine because they should never be accessed
    55  	// together. If they are, we want to find out immediately.
    56  	lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
    57  	lockInit(&trace.stringTab[0].tab.mem.lock, lockRankTraceStrings)
    58  	lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
    59  	lockInit(&trace.stringTab[1].tab.mem.lock, lockRankTraceStrings)
    60  	lockInit(&trace.stackTab[0].tab.mem.lock, lockRankTraceStackTab)
    61  	lockInit(&trace.stackTab[1].tab.mem.lock, lockRankTraceStackTab)
    62  	lockInit(&trace.lock, lockRankTrace)
    63  }
    64  
    65  // lockRankMayTraceFlush records the lock ranking effects of a
    66  // potential call to traceFlush.
    67  //
    68  // nosplit because traceAcquire is nosplit.
    69  //
    70  //go:nosplit
    71  func lockRankMayTraceFlush() {
    72  	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
    73  }
    74  
    75  // traceBlockReason is an enumeration of reasons a goroutine might block.
    76  // This is the interface the rest of the runtime uses to tell the
    77  // tracer why a goroutine blocked. The tracer then propagates this information
    78  // into the trace however it sees fit.
    79  //
    80  // Note that traceBlockReasons should not be compared, since reasons that are
    81  // distinct by name may *not* be distinct by value.
    82  type traceBlockReason uint8
    83  
    84  const (
    85  	traceBlockGeneric traceBlockReason = iota
    86  	traceBlockForever
    87  	traceBlockNet
    88  	traceBlockSelect
    89  	traceBlockCondWait
    90  	traceBlockSync
    91  	traceBlockChanSend
    92  	traceBlockChanRecv
    93  	traceBlockGCMarkAssist
    94  	traceBlockGCSweep
    95  	traceBlockSystemGoroutine
    96  	traceBlockPreempted
    97  	traceBlockDebugCall
    98  	traceBlockUntilGCEnds
    99  	traceBlockSleep
   100  )
   101  
   102  var traceBlockReasonStrings = [...]string{
   103  	traceBlockGeneric:         "unspecified",
   104  	traceBlockForever:         "forever",
   105  	traceBlockNet:             "network",
   106  	traceBlockSelect:          "select",
   107  	traceBlockCondWait:        "sync.(*Cond).Wait",
   108  	traceBlockSync:            "sync",
   109  	traceBlockChanSend:        "chan send",
   110  	traceBlockChanRecv:        "chan receive",
   111  	traceBlockGCMarkAssist:    "GC mark assist wait for work",
   112  	traceBlockGCSweep:         "GC background sweeper wait",
   113  	traceBlockSystemGoroutine: "system goroutine wait",
   114  	traceBlockPreempted:       "preempted",
   115  	traceBlockDebugCall:       "wait for debug call",
   116  	traceBlockUntilGCEnds:     "wait until GC ends",
   117  	traceBlockSleep:           "sleep",
   118  }
   119  
   120  // traceGoStopReason is an enumeration of reasons a goroutine might yield.
   121  //
   122  // Note that traceGoStopReasons should not be compared, since reasons that are
   123  // distinct by name may *not* be distinct by value.
   124  type traceGoStopReason uint8
   125  
   126  const (
   127  	traceGoStopGeneric traceGoStopReason = iota
   128  	traceGoStopGoSched
   129  	traceGoStopPreempted
   130  )
   131  
   132  var traceGoStopReasonStrings = [...]string{
   133  	traceGoStopGeneric:   "unspecified",
   134  	traceGoStopGoSched:   "runtime.Gosched",
   135  	traceGoStopPreempted: "preempted",
   136  }
   137  
   138  // traceEnabled returns true if the trace is currently enabled.
   139  //
   140  //go:nosplit
   141  func traceEnabled() bool {
   142  	return trace.enabled
   143  }
   144  
   145  // traceShuttingDown returns true if the trace is currently shutting down.
   146  func traceShuttingDown() bool {
   147  	return trace.shutdown.Load()
   148  }
   149  
   150  // traceLocker represents an M writing trace events. While a traceLocker value
   151  // is valid, the tracer observes all operations on the G/M/P or trace events being
   152  // written as happening atomically.
   153  type traceLocker struct {
   154  	mp  *m
   155  	gen uintptr
   156  }
   157  
   158  // debugTraceReentrancy checks if the trace is reentrant.
   159  //
   160  // This is optional because throwing in a function makes it instantly
   161  // not inlineable, and we want traceAcquire to be inlineable for
   162  // low overhead when the trace is disabled.
   163  const debugTraceReentrancy = false
   164  
   165  // traceAcquire prepares this M for writing one or more trace events.
   166  //
   167  // nosplit because it's called on the syscall path when stack movement is forbidden.
   168  //
   169  //go:nosplit
   170  func traceAcquire() traceLocker {
   171  	if !traceEnabled() {
   172  		return traceLocker{}
   173  	}
   174  	return traceAcquireEnabled()
   175  }
   176  
   177  // traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
   178  // broken out to make traceAcquire inlineable to keep the overhead of the tracer
   179  // when it's disabled low.
   180  //
   181  // nosplit because it's called by traceAcquire, which is nosplit.
   182  //
   183  //go:nosplit
   184  func traceAcquireEnabled() traceLocker {
   185  	// Any time we acquire a traceLocker, we may flush a trace buffer. But
   186  	// buffer flushes are rare. Record the lock edge even if it doesn't happen
   187  	// this time.
   188  	lockRankMayTraceFlush()
   189  
   190  	// Prevent preemption.
   191  	mp := acquirem()
   192  
   193  	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
   194  	// until all Ms are observed to be outside of their seqlock critical section.
   195  	//
   196  	// Note: The seqlock is mutated here and also in traceCPUSample. If you update
   197  	// usage of the seqlock here, make sure to also look at what traceCPUSample is
   198  	// doing.
   199  	seq := mp.trace.seqlock.Add(1)
   200  	if debugTraceReentrancy && seq%2 != 1 {
   201  		throw("bad use of trace.seqlock or tracer is reentrant")
   202  	}
   203  
   204  	// N.B. This load of gen appears redundant with the one in traceEnabled.
   205  	// However, it's very important that the gen we use for writing to the trace
   206  	// is acquired under a traceLocker so traceAdvance can make sure no stale
   207  	// gen values are being used.
   208  	//
   209  	// Because we're doing this load again, it also means that the trace
   210  	// might end up being disabled when we load it. In that case we need to undo
   211  	// what we did and bail.
   212  	gen := trace.gen.Load()
   213  	if gen == 0 {
   214  		mp.trace.seqlock.Add(1)
   215  		releasem(mp)
   216  		return traceLocker{}
   217  	}
   218  	return traceLocker{mp, gen}
   219  }
   220  
   221  // ok returns true if the traceLocker is valid (i.e. tracing is enabled).
   222  //
   223  // nosplit because it's called on the syscall path when stack movement is forbidden.
   224  //
   225  //go:nosplit
   226  func (tl traceLocker) ok() bool {
   227  	return tl.gen != 0
   228  }
   229  
   230  // traceRelease indicates that this M is done writing trace events.
   231  //
   232  // nosplit because it's called on the syscall path when stack movement is forbidden.
   233  //
   234  //go:nosplit
   235  func traceRelease(tl traceLocker) {
   236  	seq := tl.mp.trace.seqlock.Add(1)
   237  	if debugTraceReentrancy && seq%2 != 0 {
   238  		print("runtime: seq=", seq, "\n")
   239  		throw("bad use of trace.seqlock")
   240  	}
   241  	releasem(tl.mp)
   242  }
   243  
   244  // traceExitingSyscall marks a goroutine as exiting the syscall slow path.
   245  //
   246  // Must be paired with a traceExitedSyscall call.
   247  func traceExitingSyscall() {
   248  	trace.exitingSyscall.Add(1)
   249  }
   250  
   251  // traceExitedSyscall marks a goroutine as having exited the syscall slow path.
   252  func traceExitedSyscall() {
   253  	trace.exitingSyscall.Add(-1)
   254  }
   255  
   256  // Gomaxprocs emits a ProcsChange event.
   257  func (tl traceLocker) Gomaxprocs(procs int32) {
   258  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
   259  }
   260  
   261  // ProcStart traces a ProcStart event.
   262  //
   263  // Must be called with a valid P.
   264  func (tl traceLocker) ProcStart() {
   265  	pp := tl.mp.p.ptr()
   266  	// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
   267  	// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
   268  	// is during a syscall.
   269  	tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
   270  }
   271  
   272  // ProcStop traces a ProcStop event.
   273  func (tl traceLocker) ProcStop(pp *p) {
   274  	// The only time a goroutine is allowed to have its Proc moved around
   275  	// from under it is during a syscall.
   276  	tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
   277  }
   278  
   279  // GCActive traces a GCActive event.
   280  //
   281  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   282  // easily and only depends on where it's currently called.
   283  func (tl traceLocker) GCActive() {
   284  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
   285  	// N.B. Only one GC can be running at a time, so this is naturally
   286  	// serialized by the caller.
   287  	trace.seqGC++
   288  }
   289  
   290  // GCStart traces a GCBegin event.
   291  //
   292  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   293  // easily and only depends on where it's currently called.
   294  func (tl traceLocker) GCStart() {
   295  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
   296  	// N.B. Only one GC can be running at a time, so this is naturally
   297  	// serialized by the caller.
   298  	trace.seqGC++
   299  }
   300  
   301  // GCDone traces a GCEnd event.
   302  //
   303  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   304  // easily and only depends on where it's currently called.
   305  func (tl traceLocker) GCDone() {
   306  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
   307  	// N.B. Only one GC can be running at a time, so this is naturally
   308  	// serialized by the caller.
   309  	trace.seqGC++
   310  }
   311  
   312  // STWStart traces a STWBegin event.
   313  func (tl traceLocker) STWStart(reason stwReason) {
   314  	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
   315  	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
   316  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
   317  }
   318  
   319  // STWDone traces a STWEnd event.
   320  func (tl traceLocker) STWDone() {
   321  	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
   322  	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
   323  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
   324  }
   325  
   326  // GCSweepStart prepares to trace a sweep loop. This does not
   327  // emit any events until traceGCSweepSpan is called.
   328  //
   329  // GCSweepStart must be paired with traceGCSweepDone and there
   330  // must be no preemption points between these two calls.
   331  //
   332  // Must be called with a valid P.
   333  func (tl traceLocker) GCSweepStart() {
   334  	// Delay the actual GCSweepBegin event until the first span
   335  	// sweep. If we don't sweep anything, don't emit any events.
   336  	pp := tl.mp.p.ptr()
   337  	if pp.trace.maySweep {
   338  		throw("double traceGCSweepStart")
   339  	}
   340  	pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
   341  }
   342  
   343  // GCSweepSpan traces the sweep of a single span. If this is
   344  // the first span swept since traceGCSweepStart was called, this
   345  // will emit a GCSweepBegin event.
   346  //
   347  // This may be called outside a traceGCSweepStart/traceGCSweepDone
   348  // pair; however, it will not emit any trace events in this case.
   349  //
   350  // Must be called with a valid P.
   351  func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
   352  	pp := tl.mp.p.ptr()
   353  	if pp.trace.maySweep {
   354  		if pp.trace.swept == 0 {
   355  			tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
   356  			pp.trace.inSweep = true
   357  		}
   358  		pp.trace.swept += bytesSwept
   359  	}
   360  }
   361  
   362  // GCSweepDone finishes tracing a sweep loop. If any memory was
   363  // swept (i.e. traceGCSweepSpan emitted an event) then this will emit
   364  // a GCSweepEnd event.
   365  //
   366  // Must be called with a valid P.
   367  func (tl traceLocker) GCSweepDone() {
   368  	pp := tl.mp.p.ptr()
   369  	if !pp.trace.maySweep {
   370  		throw("missing traceGCSweepStart")
   371  	}
   372  	if pp.trace.inSweep {
   373  		tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
   374  		pp.trace.inSweep = false
   375  	}
   376  	pp.trace.maySweep = false
   377  }
   378  
   379  // GCMarkAssistStart emits a MarkAssistBegin event.
   380  func (tl traceLocker) GCMarkAssistStart() {
   381  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
   382  }
   383  
   384  // GCMarkAssistDone emits a MarkAssistEnd event.
   385  func (tl traceLocker) GCMarkAssistDone() {
   386  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
   387  }
   388  
   389  // GoCreate emits a GoCreate event.
   390  func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
   391  	newg.trace.setStatusTraced(tl.gen)
   392  	ev := traceEvGoCreate
   393  	if blocked {
   394  		ev = traceEvGoCreateBlocked
   395  	}
   396  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
   397  }
   398  
   399  // GoStart emits a GoStart event.
   400  //
   401  // Must be called with a valid P.
   402  func (tl traceLocker) GoStart() {
   403  	gp := getg().m.curg
   404  	pp := gp.m.p
   405  	w := tl.eventWriter(traceGoRunnable, traceProcRunning)
   406  	w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
   407  	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
   408  		w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
   409  	}
   410  	w.end()
   411  }
   412  
   413  // GoEnd emits a GoDestroy event.
   414  //
   415  // TODO(mknyszek): Rename this to GoDestroy.
   416  func (tl traceLocker) GoEnd() {
   417  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
   418  }
   419  
   420  // GoSched emits a GoStop event with a GoSched reason.
   421  func (tl traceLocker) GoSched() {
   422  	tl.GoStop(traceGoStopGoSched)
   423  }
   424  
   425  // GoPreempt emits a GoStop event with a GoPreempted reason.
   426  func (tl traceLocker) GoPreempt() {
   427  	tl.GoStop(traceGoStopPreempted)
   428  }
   429  
   430  // GoStop emits a GoStop event with the provided reason.
   431  func (tl traceLocker) GoStop(reason traceGoStopReason) {
   432  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
   433  }
   434  
   435  // GoPark emits a GoBlock event with the provided reason.
   436  //
   437  // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
   438  // that we have both, and waitReason is way more descriptive.
   439  func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
   440  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
   441  }
   442  
   443  // GoUnpark emits a GoUnblock event.
   444  func (tl traceLocker) GoUnpark(gp *g, skip int) {
   445  	// Emit a GoWaiting status if necessary for the unblocked goroutine.
   446  	w := tl.eventWriter(traceGoRunning, traceProcRunning)
   447  	// Careful: don't use the event writer. We never want status or in-progress events
   448  	// to trigger more in-progress events.
   449  	w.w = emitUnblockStatus(w.w, gp, tl.gen)
   450  	w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
   451  }
   452  
   453  // GoCoroswitch emits a GoSwitch event. If destroy is true, the calling goroutine
   454  // is simultaneously being destroyed.
   455  func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
   456  	// Emit a GoWaiting status if necessary for the unblocked goroutine.
   457  	w := tl.eventWriter(traceGoRunning, traceProcRunning)
   458  	// Careful: don't use the event writer. We never want status or in-progress events
   459  	// to trigger more in-progress events.
   460  	w.w = emitUnblockStatus(w.w, nextg, tl.gen)
   461  	ev := traceEvGoSwitch
   462  	if destroy {
   463  		ev = traceEvGoSwitchDestroy
   464  	}
   465  	w.commit(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
   466  }
   467  
   468  // emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
   469  // unblocked to the trace writer.
   470  func emitUnblockStatus(w traceWriter, gp *g, gen uintptr) traceWriter {
   471  	if !gp.trace.statusWasTraced(gen) && gp.trace.acquireStatus(gen) {
   472  		// TODO(go.dev/issue/65634): Although it would be nice to add a stack trace here of gp,
   473  		// we cannot safely do so. gp is in _Gwaiting and so we don't have ownership of its stack.
   474  		// We can fix this by acquiring the goroutine's scan bit.
   475  		w = w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist, 0)
   476  	}
   477  	return w
   478  }
   479  
   480  // GoSysCall emits a GoSyscallBegin event.
   481  //
   482  // Must be called with a valid P.
   483  func (tl traceLocker) GoSysCall() {
   484  	// Scribble down the M that the P is currently attached to.
   485  	pp := tl.mp.p.ptr()
   486  	pp.trace.mSyscallID = int64(tl.mp.procid)
   487  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
   488  }
   489  
   490  // GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
   491  // if lostP is true.
   492  //
   493  // lostP must be true in all cases that a goroutine loses its P during a syscall.
   494  // This means it's not sufficient to check if it has no P. In particular, it needs to be
   495  // true in the following cases:
   496  // - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
   497  // - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
   498  // - The goroutine lost its P and acquired a different one, and is now running with that P.
   499  func (tl traceLocker) GoSysExit(lostP bool) {
   500  	ev := traceEvGoSyscallEnd
   501  	procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
   502  	if lostP {
   503  		ev = traceEvGoSyscallEndBlocked
   504  		procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
   505  	} else {
   506  		tl.mp.p.ptr().trace.mSyscallID = -1
   507  	}
   508  	tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
   509  }
   510  
   511  // ProcSteal indicates that our current M stole a P from another M.
   512  //
   513  // inSyscall indicates that we're stealing the P from a syscall context.
   514  //
   515  // The caller must have ownership of pp.
   516  func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
   517  	// Grab the M ID we stole from.
   518  	mStolenFrom := pp.trace.mSyscallID
   519  	pp.trace.mSyscallID = -1
   520  
   521  	// The status of the proc and goroutine, if we need to emit one here, is not evident from the
   522  	// context of just emitting this event alone. There are two cases. Either we're trying to steal
   523  	// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
   524  	// ourselves specifically to keep running. The two contexts look different, but can be summarized
   525  	// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
   526  	// In the latter, we're a goroutine in a syscall.
   527  	goStatus := traceGoRunning
   528  	procStatus := traceProcRunning
   529  	if inSyscall {
   530  		goStatus = traceGoSyscall
   531  		procStatus = traceProcSyscallAbandoned
   532  	}
   533  	w := tl.eventWriter(goStatus, procStatus)
   534  
   535  	// Emit the status of the P we're stealing. We may have *just* done this when creating the event
   536  	// writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
   537  	// syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
   538  	// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
   539  	// at all (e.g. entersyscall_gcwait).
   540  	if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
   541  		// Careful: don't use the event writer. We never want status or in-progress events
   542  		// to trigger more in-progress events.
   543  		w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
   544  	}
   545  	w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
   546  }
   547  
   548  // HeapAlloc emits a HeapAlloc event.
   549  func (tl traceLocker) HeapAlloc(live uint64) {
   550  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
   551  }
   552  
   553  // HeapGoal reads the current heap goal and emits a HeapGoal event.
   554  func (tl traceLocker) HeapGoal() {
   555  	heapGoal := gcController.heapGoal()
   556  	if heapGoal == ^uint64(0) {
   557  		// Heap-based triggering is disabled.
   558  		heapGoal = 0
   559  	}
   560  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
   561  }
   562  
   563  // OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
   564  // it's a good place to insert a thread-level event about the new extra M.
   565  func (tl traceLocker) OneNewExtraM(_ *g) {
   566  }
   567  
   568  // GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
   569  //
   570  // Unlike GoCreate, the caller must be running on gp.
   571  //
   572  // This occurs when C code calls into Go. On pthread platforms it occurs only when
   573  // a C thread calls into Go code for the first time.
   574  func (tl traceLocker) GoCreateSyscall(gp *g) {
   575  	// N.B. We should never trace a status for this goroutine (which we're currently running on),
   576  	// since we want this to appear like goroutine creation.
   577  	gp.trace.setStatusTraced(tl.gen)
   578  	tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
   579  }
   580  
   581  // GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
   582  //
   583  // Must not have a P.
   584  //
   585  // This occurs when Go code returns back to C. On pthread platforms it occurs only when
   586  // the C thread is destroyed.
   587  func (tl traceLocker) GoDestroySyscall() {
   588  	// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
   589  	// that is in the syscall state.
   590  	tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
   591  }
   592  
   593  // To access runtime functions from runtime/trace.
   594  // See runtime/trace/annotation.go
   595  
   596  // trace_userTaskCreate emits a UserTaskCreate event.
   597  //
   598  //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
   599  func trace_userTaskCreate(id, parentID uint64, taskType string) {
   600  	tl := traceAcquire()
   601  	if !tl.ok() {
   602  		// Need to do this check because the caller won't have it.
   603  		return
   604  	}
   605  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
   606  	traceRelease(tl)
   607  }
   608  
   609  // trace_userTaskEnd emits a UserTaskEnd event.
   610  //
   611  //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
   612  func trace_userTaskEnd(id uint64) {
   613  	tl := traceAcquire()
   614  	if !tl.ok() {
   615  		// Need to do this check because the caller won't have it.
   616  		return
   617  	}
   618  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
   619  	traceRelease(tl)
   620  }
   621  
   622  // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
   623  // depending on mode (0 == Begin, 1 == End).
   624  //
   625  // TODO(mknyszek): Just make this two functions.
   626  //
   627  //go:linkname trace_userRegion runtime/trace.userRegion
   628  func trace_userRegion(id, mode uint64, name string) {
   629  	tl := traceAcquire()
   630  	if !tl.ok() {
   631  		// Need to do this check because the caller won't have it.
   632  		return
   633  	}
   634  	var ev traceEv
   635  	switch mode {
   636  	case 0:
   637  		ev = traceEvUserRegionBegin
   638  	case 1:
   639  		ev = traceEvUserRegionEnd
   640  	default:
   641  		return
   642  	}
   643  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
   644  	traceRelease(tl)
   645  }
   646  
   647  // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
   648  //
   649  //go:linkname trace_userLog runtime/trace.userLog
   650  func trace_userLog(id uint64, category, message string) {
   651  	tl := traceAcquire()
   652  	if !tl.ok() {
   653  		// Need to do this check because the caller won't have it.
   654  		return
   655  	}
   656  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
   657  	traceRelease(tl)
   658  }
   659  
   660  // traceProcFree is called when a P is destroyed.
   661  //
   662  // This must run on the system stack to match the old tracer.
   663  //
   664  //go:systemstack
   665  func traceProcFree(_ *p) {
   666  }
   667  
   668  // traceThreadDestroy is called when a thread is removed from
   669  // sched.freem.
   670  //
   671  // mp must not be able to emit trace events anymore.
   672  //
   673  // sched.lock must be held to synchronize with traceAdvance.
   674  func traceThreadDestroy(mp *m) {
   675  	assertLockHeld(&sched.lock)
   676  
   677  	// Flush all outstanding buffers to maintain the invariant
   678  	// that an M only has active buffers while on sched.freem
   679  	// or allm.
   680  	//
   681  	// Perform a traceAcquire/traceRelease on behalf of mp to
   682  	// synchronize with the tracer trying to flush our buffer
   683  	// as well.
   684  	seq := mp.trace.seqlock.Add(1)
   685  	if debugTraceReentrancy && seq%2 != 1 {
   686  		throw("bad use of trace.seqlock or tracer is reentrant")
   687  	}
   688  	systemstack(func() {
   689  		lock(&trace.lock)
   690  		for i := range mp.trace.buf {
   691  			if mp.trace.buf[i] != nil {
   692  				// N.B. traceBufFlush accepts a generation, but it
   693  				// really just cares about gen%2.
   694  				traceBufFlush(mp.trace.buf[i], uintptr(i))
   695  				mp.trace.buf[i] = nil
   696  			}
   697  		}
   698  		unlock(&trace.lock)
   699  	})
   700  	seq1 := mp.trace.seqlock.Add(1)
   701  	if seq1 != seq+1 {
   702  		print("runtime: seq1=", seq1, "\n")
   703  		throw("bad use of trace.seqlock")
   704  	}
   705  }
   706  
   707  // Not used in the new tracer; solely for compatibility with the old tracer.
   708  // nosplit because it's called from exitsyscall without a P.
   709  //
   710  //go:nosplit
   711  func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {
   712  }
   713  

View as plain text