Source file src/runtime/time.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Time-related runtime and pieces of package time.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/runtime/atomic"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  //go:linkname time_runtimeNow time.runtimeNow
    17  func time_runtimeNow() (sec int64, nsec int32, mono int64) {
    18  	if bubble := getg().bubble; bubble != nil {
    19  		sec = bubble.now / (1000 * 1000 * 1000)
    20  		nsec = int32(bubble.now % (1000 * 1000 * 1000))
    21  		// Don't return a monotonic time inside a synctest bubble.
    22  		// If we return a monotonic time based on the fake clock,
    23  		// arithmetic on times created inside/outside bubbles is confusing.
    24  		// If we return a monotonic time based on the real monotonic clock,
    25  		// arithmetic on times created in the same bubble is confusing.
    26  		// Simplest is to omit the monotonic time within a bubble.
    27  		return sec, nsec, 0
    28  	}
    29  	return time_now()
    30  }
    31  
    32  //go:linkname time_runtimeNano time.runtimeNano
    33  func time_runtimeNano() int64 {
    34  	gp := getg()
    35  	if gp.bubble != nil {
    36  		return gp.bubble.now
    37  	}
    38  	return nanotime()
    39  }
    40  
    41  //go:linkname time_runtimeIsBubbled time.runtimeIsBubbled
    42  func time_runtimeIsBubbled() bool {
    43  	return getg().bubble != nil
    44  }
    45  
    46  // A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq).
    47  // Timers are allocated by client code, often as part of other data structures.
    48  // Each P has a heap of pointers to timers that it manages.
    49  //
    50  // A timer is expected to be used by only one client goroutine at a time,
    51  // but there will be concurrent access by the P managing that timer.
    52  // Timer accesses are protected by the lock t.mu, with a snapshot of
    53  // t's state bits published in t.astate to enable certain fast paths to make
    54  // decisions about a timer without acquiring the lock.
    55  type timer struct {
    56  	// mu protects reads and writes to all fields, with exceptions noted below.
    57  	mu mutex
    58  
    59  	astate atomic.Uint8 // atomic copy of state bits at last unlock
    60  	state  uint8        // state bits
    61  	isChan bool         // timer has a channel; immutable; can be read without lock
    62  	isFake bool         // timer is using fake time; immutable; can be read without lock
    63  
    64  	blocked uint32 // number of goroutines blocked on timer's channel
    65  	rand    uint32 // randomizes order of timers at same instant; only set when isFake
    66  
    67  	// Timer wakes up at when, and then at when+period, ... (period > 0 only)
    68  	// each time calling f(arg, seq, delay) in the timer goroutine, so f must be
    69  	// a well-behaved function and not block.
    70  	//
    71  	// The arg and seq are client-specified opaque arguments passed back to f.
    72  	// When used from netpoll, arg and seq have meanings defined by netpoll
    73  	// and are completely opaque to this code; in that context, seq is a sequence
    74  	// number to recognize and squelch stale function invocations.
    75  	// When used from package time, arg is a channel (for After, NewTicker)
    76  	// or the function to call (for AfterFunc) and seq is unused (0).
    77  	//
    78  	// Package time does not know about seq, but if this is a channel timer (t.isChan == true),
    79  	// this file uses t.seq as a sequence number to recognize and squelch
    80  	// sends that correspond to an earlier (stale) timer configuration,
    81  	// similar to its use in netpoll. In this usage (that is, when t.isChan == true),
    82  	// writes to seq are protected by both t.mu and t.sendLock,
    83  	// so reads are allowed when holding either of the two mutexes.
    84  	//
    85  	// The delay argument is nanotime() - t.when, meaning the delay in ns between
    86  	// when the timer should have gone off and now. Normally that amount is
    87  	// small enough not to matter, but for channel timers that are fed lazily,
    88  	// the delay can be arbitrarily long; package time subtracts it out to make
    89  	// it look like the send happened earlier than it actually did.
    90  	// (No one looked at the channel since then, or the send would have
    91  	// not happened so late, so no one can tell the difference.)
    92  	when   int64
    93  	period int64
    94  	f      func(arg any, seq uintptr, delay int64)
    95  	arg    any
    96  	seq    uintptr
    97  
    98  	// If non-nil, the timers containing t.
    99  	ts *timers
   100  
   101  	// sendLock protects sends on the timer's channel.
   102  	// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
   103  	sendLock mutex
   104  
   105  	// isSending is used to handle races between running a
   106  	// channel timer and stopping or resetting the timer.
   107  	// It is used only for channel timers (t.isChan == true).
   108  	// It is not used for tickers.
   109  	// The value is incremented when about to send a value on the channel,
   110  	// and decremented after sending the value.
   111  	// The stop/reset code uses this to detect whether it
   112  	// stopped the channel send.
   113  	//
   114  	// isSending is incremented only when t.mu is held.
   115  	// isSending is decremented only when t.sendLock is held.
   116  	// isSending is read only when both t.mu and t.sendLock are held.
   117  	isSending atomic.Int32
   118  }
   119  
   120  // init initializes a newly allocated timer t.
   121  // Any code that allocates a timer must call t.init before using it.
   122  // The arg and f can be set during init, or they can be nil in init
   123  // and set by a future call to t.modify.
   124  func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) {
   125  	lockInit(&t.mu, lockRankTimer)
   126  	t.f = f
   127  	t.arg = arg
   128  }
   129  
   130  // A timers is a per-P set of timers.
   131  type timers struct {
   132  	// mu protects timers; timers are per-P, but the scheduler can
   133  	// access the timers of another P, so we have to lock.
   134  	mu mutex
   135  
   136  	// heap is the set of timers, ordered by heap[i].when.
   137  	// Must hold lock to access.
   138  	heap []timerWhen
   139  
   140  	// len is an atomic copy of len(heap).
   141  	len atomic.Uint32
   142  
   143  	// zombies is the number of timers in the heap
   144  	// that are marked for removal.
   145  	zombies atomic.Int32
   146  
   147  	// raceCtx is the race context used while executing timer functions.
   148  	raceCtx uintptr
   149  
   150  	// minWhenHeap is the minimum heap[i].when value (= heap[0].when).
   151  	// The wakeTime method uses minWhenHeap and minWhenModified
   152  	// to determine the next wake time.
   153  	// If minWhenHeap = 0, it means there are no timers in the heap.
   154  	minWhenHeap atomic.Int64
   155  
   156  	// minWhenModified is a lower bound on the minimum
   157  	// heap[i].when over timers with the timerModified bit set.
   158  	// If minWhenModified = 0, it means there are no timerModified timers in the heap.
   159  	minWhenModified atomic.Int64
   160  }
   161  
   162  type timerWhen struct {
   163  	timer *timer
   164  	when  int64
   165  }
   166  
   167  // less reports whether tw is less than other.
   168  func (tw timerWhen) less(other timerWhen) bool {
   169  	switch {
   170  	case tw.when < other.when:
   171  		return true
   172  	case tw.when > other.when:
   173  		return false
   174  	default:
   175  		// When timers wake at the same time, use a per-timer random value to order them.
   176  		// We only set the random value for timers using fake time, since there's
   177  		// no practical way to schedule real-time timers for the same instant.
   178  		return tw.timer.rand < other.timer.rand
   179  	}
   180  }
   181  
   182  func (ts *timers) lock() {
   183  	lock(&ts.mu)
   184  }
   185  
   186  func (ts *timers) unlock() {
   187  	// Update atomic copy of len(ts.heap).
   188  	// We only update at unlock so that the len is always
   189  	// the most recent unlocked length, not an ephemeral length.
   190  	// This matters if we lock ts, delete the only timer from the heap,
   191  	// add it back, and unlock. We want ts.len.Load to return 1 the
   192  	// entire time, never 0. This is important for pidleput deciding
   193  	// whether ts is empty.
   194  	ts.len.Store(uint32(len(ts.heap)))
   195  
   196  	unlock(&ts.mu)
   197  }
   198  
   199  // Timer state field.
   200  const (
   201  	// timerHeaped is set when the timer is stored in some P's heap.
   202  	timerHeaped uint8 = 1 << iota
   203  
   204  	// timerModified is set when t.when has been modified
   205  	// but the heap's heap[i].when entry still needs to be updated.
   206  	// That change waits until the heap in which
   207  	// the timer appears can be locked and rearranged.
   208  	// timerModified is only set when timerHeaped is also set.
   209  	timerModified
   210  
   211  	// timerZombie is set when the timer has been stopped
   212  	// but is still present in some P's heap.
   213  	// Only set when timerHeaped is also set.
   214  	// It is possible for timerModified and timerZombie to both
   215  	// be set, meaning that the timer was modified and then stopped.
   216  	// A timer sending to a channel may be placed in timerZombie
   217  	// to take it out of the heap even though the timer is not stopped,
   218  	// as long as nothing is reading from the channel.
   219  	timerZombie
   220  )
   221  
   222  // timerDebug enables printing a textual debug trace of all timer operations to stderr.
   223  const timerDebug = false
   224  
   225  func (t *timer) trace(op string) {
   226  	if timerDebug {
   227  		t.trace1(op)
   228  	}
   229  }
   230  
   231  func (t *timer) trace1(op string) {
   232  	if !timerDebug {
   233  		return
   234  	}
   235  	bits := [4]string{"h", "m", "z", "c"}
   236  	for i := range 3 {
   237  		if t.state&(1<<i) == 0 {
   238  			bits[i] = "-"
   239  		}
   240  	}
   241  	if !t.isChan {
   242  		bits[3] = "-"
   243  	}
   244  	print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n")
   245  }
   246  
   247  func (ts *timers) trace(op string) {
   248  	if timerDebug {
   249  		println("TS", ts, op)
   250  	}
   251  }
   252  
   253  // lock locks the timer, allowing reading or writing any of the timer fields.
   254  func (t *timer) lock() {
   255  	lock(&t.mu)
   256  	t.trace("lock")
   257  }
   258  
   259  // unlock updates t.astate and unlocks the timer.
   260  func (t *timer) unlock() {
   261  	t.trace("unlock")
   262  	// Let heap fast paths know whether heap[i].when is accurate.
   263  	// Also let maybeRunChan know whether channel is in heap.
   264  	t.astate.Store(t.state)
   265  	unlock(&t.mu)
   266  }
   267  
   268  // hchan returns the channel in t.arg.
   269  // t must be a timer with a channel.
   270  func (t *timer) hchan() *hchan {
   271  	if !t.isChan {
   272  		badTimer()
   273  	}
   274  	// Note: t.arg is a chan time.Time,
   275  	// and runtime cannot refer to that type,
   276  	// so we cannot use a type assertion.
   277  	return (*hchan)(efaceOf(&t.arg).data)
   278  }
   279  
   280  // updateHeap updates t as directed by t.state, updating t.state
   281  // and returning a bool indicating whether the state (and ts.heap[0].when) changed.
   282  // The caller must hold t's lock, or the world can be stopped instead.
   283  // The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap
   284  // takes care of moving t within the timers heap to preserve the heap invariants.
   285  // If ts == nil, then t must not be in a heap (or is in a heap that is
   286  // temporarily not maintaining its invariant, such as during timers.adjust).
   287  func (t *timer) updateHeap() (updated bool) {
   288  	assertWorldStoppedOrLockHeld(&t.mu)
   289  	t.trace("updateHeap")
   290  	ts := t.ts
   291  	if ts == nil || t != ts.heap[0].timer {
   292  		badTimer()
   293  	}
   294  	assertLockHeld(&ts.mu)
   295  	if t.state&timerZombie != 0 {
   296  		// Take timer out of heap.
   297  		t.state &^= timerHeaped | timerZombie | timerModified
   298  		ts.zombies.Add(-1)
   299  		ts.deleteMin()
   300  		return true
   301  	}
   302  
   303  	if t.state&timerModified != 0 {
   304  		// Update ts.heap[0].when and move within heap.
   305  		t.state &^= timerModified
   306  		ts.heap[0].when = t.when
   307  		ts.siftDown(0)
   308  		ts.updateMinWhenHeap()
   309  		return true
   310  	}
   311  
   312  	return false
   313  }
   314  
   315  // maxWhen is the maximum value for timer's when field.
   316  const maxWhen = 1<<63 - 1
   317  
   318  // verifyTimers can be set to true to add debugging checks that the
   319  // timer heaps are valid.
   320  const verifyTimers = false
   321  
   322  // Package time APIs.
   323  // Godoc uses the comments in package time, not these.
   324  
   325  // time.now is implemented in assembly.
   326  
   327  // timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
   328  //
   329  //go:linkname timeSleep time.Sleep
   330  func timeSleep(ns int64) {
   331  	if ns <= 0 {
   332  		return
   333  	}
   334  
   335  	gp := getg()
   336  	t := gp.timer
   337  	if t == nil {
   338  		t = new(timer)
   339  		t.init(goroutineReady, gp)
   340  		if gp.bubble != nil {
   341  			t.isFake = true
   342  		}
   343  		gp.timer = t
   344  	}
   345  	var now int64
   346  	if bubble := gp.bubble; bubble != nil {
   347  		now = bubble.now
   348  	} else {
   349  		now = nanotime()
   350  	}
   351  	when := now + ns
   352  	if when < 0 { // check for overflow.
   353  		when = maxWhen
   354  	}
   355  	gp.sleepWhen = when
   356  	if t.isFake {
   357  		// Call timer.reset in this goroutine, since it's the one in a bubble.
   358  		// We don't need to worry about the timer function running before the goroutine
   359  		// is parked, because time won't advance until we park.
   360  		resetForSleep(gp, nil)
   361  		gopark(nil, nil, waitReasonSleep, traceBlockSleep, 1)
   362  	} else {
   363  		gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1)
   364  	}
   365  }
   366  
   367  // resetForSleep is called after the goroutine is parked for timeSleep.
   368  // We can't call timer.reset in timeSleep itself because if this is a short
   369  // sleep and there are many goroutines then the P can wind up running the
   370  // timer function, goroutineReady, before the goroutine has been parked.
   371  func resetForSleep(gp *g, _ unsafe.Pointer) bool {
   372  	gp.timer.reset(gp.sleepWhen, 0)
   373  	return true
   374  }
   375  
   376  // A timeTimer is a runtime-allocated time.Timer or time.Ticker
   377  // with the additional runtime state following it.
   378  // The runtime state is inaccessible to package time.
   379  type timeTimer struct {
   380  	c    unsafe.Pointer // <-chan time.Time
   381  	init bool
   382  	timer
   383  }
   384  
   385  // newTimer allocates and returns a new time.Timer or time.Ticker (same layout)
   386  // with the given parameters.
   387  //
   388  //go:linkname newTimer time.newTimer
   389  func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer {
   390  	t := new(timeTimer)
   391  	t.timer.init(nil, nil)
   392  	t.trace("new")
   393  	if raceenabled {
   394  		racerelease(unsafe.Pointer(&t.timer))
   395  	}
   396  	if c != nil {
   397  		lockInit(&t.sendLock, lockRankTimerSend)
   398  		t.isChan = true
   399  		c.timer = &t.timer
   400  		if c.dataqsiz == 0 {
   401  			throw("invalid timer channel: no capacity")
   402  		}
   403  	}
   404  	if bubble := getg().bubble; bubble != nil {
   405  		t.isFake = true
   406  	}
   407  	t.modify(when, period, f, arg, 0)
   408  	t.init = true
   409  	return t
   410  }
   411  
   412  // stopTimer stops a timer.
   413  // It reports whether t was stopped before being run.
   414  //
   415  //go:linkname stopTimer time.stopTimer
   416  func stopTimer(t *timeTimer) bool {
   417  	if t.isFake && getg().bubble == nil {
   418  		fatal("stop of synctest timer from outside bubble")
   419  	}
   420  	return t.stop()
   421  }
   422  
   423  // resetTimer resets an inactive timer, adding it to the timer heap.
   424  //
   425  // Reports whether the timer was modified before it was run.
   426  //
   427  //go:linkname resetTimer time.resetTimer
   428  func resetTimer(t *timeTimer, when, period int64) bool {
   429  	if raceenabled {
   430  		racerelease(unsafe.Pointer(&t.timer))
   431  	}
   432  	if t.isFake && getg().bubble == nil {
   433  		fatal("reset of synctest timer from outside bubble")
   434  	}
   435  	return t.reset(when, period)
   436  }
   437  
   438  // Go runtime.
   439  
   440  // Ready the goroutine arg.
   441  func goroutineReady(arg any, _ uintptr, _ int64) {
   442  	goready(arg.(*g), 0)
   443  }
   444  
   445  // addHeap adds t to the timers heap.
   446  // The caller must hold ts.lock or the world must be stopped.
   447  // The caller must also have checked that t belongs in the heap.
   448  // Callers that are not sure can call t.maybeAdd instead,
   449  // but note that maybeAdd has different locking requirements.
   450  func (ts *timers) addHeap(t *timer) {
   451  	assertWorldStoppedOrLockHeld(&ts.mu)
   452  	// Timers rely on the network poller, so make sure the poller
   453  	// has started.
   454  	if netpollInited.Load() == 0 {
   455  		netpollGenericInit()
   456  	}
   457  
   458  	if t.ts != nil {
   459  		throw("ts set in timer")
   460  	}
   461  	t.ts = ts
   462  	ts.heap = append(ts.heap, timerWhen{t, t.when})
   463  	ts.siftUp(len(ts.heap) - 1)
   464  	if t == ts.heap[0].timer {
   465  		ts.updateMinWhenHeap()
   466  	}
   467  }
   468  
   469  // maybeRunAsync checks whether t needs to be triggered and runs it if so.
   470  // The caller is responsible for locking the timer and for checking that we
   471  // are running timers in async mode. If the timer needs to be run,
   472  // maybeRunAsync will unlock and re-lock it.
   473  // The timer is always locked on return.
   474  func (t *timer) maybeRunAsync() {
   475  	assertLockHeld(&t.mu)
   476  	if t.state&timerHeaped == 0 && t.isChan && t.when > 0 {
   477  		// If timer should have triggered already (but nothing looked at it yet),
   478  		// trigger now, so that a receive after the stop sees the "old" value
   479  		// that should be there.
   480  		// (It is possible to have t.blocked > 0 if there is a racing receive
   481  		// in blockTimerChan, but timerHeaped not being set means
   482  		// it hasn't run t.maybeAdd yet; in that case, running the
   483  		// timer ourselves now is fine.)
   484  		if now := nanotime(); t.when <= now {
   485  			systemstack(func() {
   486  				t.unlockAndRun(now, nil) // resets t.when
   487  			})
   488  			t.lock()
   489  		}
   490  	}
   491  }
   492  
   493  // stop stops the timer t. It may be on some other P, so we can't
   494  // actually remove it from the timers heap. We can only mark it as stopped.
   495  // It will be removed in due course by the P whose heap it is on.
   496  // Reports whether the timer was stopped before it was run.
   497  func (t *timer) stop() bool {
   498  	async := debug.asynctimerchan.Load() != 0
   499  	if !async && t.isChan {
   500  		lock(&t.sendLock)
   501  	}
   502  
   503  	t.lock()
   504  	t.trace("stop")
   505  	if async {
   506  		t.maybeRunAsync()
   507  	}
   508  	if t.state&timerHeaped != 0 {
   509  		t.state |= timerModified
   510  		if t.state&timerZombie == 0 {
   511  			t.state |= timerZombie
   512  			t.ts.zombies.Add(1)
   513  		}
   514  	}
   515  	pending := t.when > 0
   516  	t.when = 0
   517  
   518  	if !async && t.isChan {
   519  		// Stop any future sends with stale values.
   520  		// See timer.unlockAndRun.
   521  		t.seq++
   522  
   523  		// If there is currently a send in progress,
   524  		// incrementing seq is going to prevent that
   525  		// send from actually happening. That means
   526  		// that we should return true: the timer was
   527  		// stopped, even though t.when may be zero.
   528  		if t.period == 0 && t.isSending.Load() > 0 {
   529  			pending = true
   530  		}
   531  	}
   532  	t.unlock()
   533  	if !async && t.isChan {
   534  		unlock(&t.sendLock)
   535  		if timerchandrain(t.hchan()) {
   536  			pending = true
   537  		}
   538  	}
   539  
   540  	return pending
   541  }
   542  
   543  // deleteMin removes timer 0 from ts.
   544  // ts must be locked.
   545  func (ts *timers) deleteMin() {
   546  	assertLockHeld(&ts.mu)
   547  	t := ts.heap[0].timer
   548  	if t.ts != ts {
   549  		throw("wrong timers")
   550  	}
   551  	t.ts = nil
   552  	last := len(ts.heap) - 1
   553  	if last > 0 {
   554  		ts.heap[0] = ts.heap[last]
   555  	}
   556  	ts.heap[last] = timerWhen{}
   557  	ts.heap = ts.heap[:last]
   558  	if last > 0 {
   559  		ts.siftDown(0)
   560  	}
   561  	ts.updateMinWhenHeap()
   562  	if last == 0 {
   563  		// If there are no timers, then clearly there are no timerModified timers.
   564  		ts.minWhenModified.Store(0)
   565  	}
   566  }
   567  
   568  // modify modifies an existing timer.
   569  // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset.
   570  // Reports whether the timer was modified before it was run.
   571  // If f == nil, then t.f, t.arg, and t.seq are not modified.
   572  func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool {
   573  	if when <= 0 {
   574  		throw("timer when must be positive")
   575  	}
   576  	if period < 0 {
   577  		throw("timer period must be non-negative")
   578  	}
   579  	async := debug.asynctimerchan.Load() != 0
   580  
   581  	if !async && t.isChan {
   582  		lock(&t.sendLock)
   583  	}
   584  
   585  	t.lock()
   586  	if async {
   587  		t.maybeRunAsync()
   588  	}
   589  	t.trace("modify")
   590  	oldPeriod := t.period
   591  	t.period = period
   592  	if f != nil {
   593  		t.f = f
   594  		t.arg = arg
   595  		t.seq = seq
   596  	}
   597  
   598  	wake := false
   599  	pending := t.when > 0
   600  	t.when = when
   601  	if t.state&timerHeaped != 0 {
   602  		t.state |= timerModified
   603  		if t.state&timerZombie != 0 {
   604  			// In the heap but marked for removal (by a Stop).
   605  			// Unmark it, since it has been Reset and will be running again.
   606  			t.ts.zombies.Add(-1)
   607  			t.state &^= timerZombie
   608  		}
   609  		// The corresponding heap[i].when is updated later.
   610  		// See comment in type timer above and in timers.adjust below.
   611  		if min := t.ts.minWhenModified.Load(); min == 0 || when < min {
   612  			wake = true
   613  			// Force timerModified bit out to t.astate before updating t.minWhenModified,
   614  			// to synchronize with t.ts.adjust. See comment in adjust.
   615  			t.astate.Store(t.state)
   616  			t.ts.updateMinWhenModified(when)
   617  		}
   618  	}
   619  
   620  	add := t.needsAdd()
   621  
   622  	if add && t.isFake {
   623  		// If this is a bubbled timer scheduled to fire immediately,
   624  		// run it now rather than waiting for the bubble's timer scheduler.
   625  		// This avoids deferring timer execution until after the bubble
   626  		// becomes durably blocked.
   627  		//
   628  		// Don't do this for non-bubbled timers: It isn't necessary,
   629  		// and there may be cases where the runtime executes timers with
   630  		// the expectation the timer func will not run in the current goroutine.
   631  		// Bubbled timers are always created by the time package, and are
   632  		// safe to run in the current goroutine.
   633  		bubble := getg().bubble
   634  		if bubble == nil {
   635  			throw("fake timer executing with no bubble")
   636  		}
   637  		if t.state&timerHeaped == 0 && when <= bubble.now {
   638  			systemstack(func() {
   639  				if !async && t.isChan {
   640  					unlock(&t.sendLock)
   641  				}
   642  				t.unlockAndRun(bubble.now, bubble)
   643  			})
   644  			return pending
   645  		}
   646  	}
   647  
   648  	if !async && t.isChan {
   649  		// Stop any future sends with stale values.
   650  		// See timer.unlockAndRun.
   651  		t.seq++
   652  
   653  		// If there is currently a send in progress,
   654  		// incrementing seq is going to prevent that
   655  		// send from actually happening. That means
   656  		// that we should return true: the timer was
   657  		// stopped, even though t.when may be zero.
   658  		if oldPeriod == 0 && t.isSending.Load() > 0 {
   659  			pending = true
   660  		}
   661  	}
   662  	t.unlock()
   663  	if !async && t.isChan {
   664  		if timerchandrain(t.hchan()) {
   665  			pending = true
   666  		}
   667  		unlock(&t.sendLock)
   668  	}
   669  
   670  	if add {
   671  		t.maybeAdd()
   672  	}
   673  	if wake {
   674  		wakeNetPoller(when)
   675  	}
   676  
   677  	return pending
   678  }
   679  
   680  // needsAdd reports whether t needs to be added to a timers heap.
   681  // t must be locked.
   682  func (t *timer) needsAdd() bool {
   683  	assertLockHeld(&t.mu)
   684  	need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0)
   685  	if need {
   686  		t.trace("needsAdd+")
   687  	} else {
   688  		t.trace("needsAdd-")
   689  	}
   690  	return need
   691  }
   692  
   693  // maybeAdd adds t to the local timers heap if it needs to be in a heap.
   694  // The caller must not hold t's lock nor any timers heap lock.
   695  // The caller probably just unlocked t, but that lock must be dropped
   696  // in order to acquire a ts.lock, to avoid lock inversions.
   697  // (timers.adjust holds ts.lock while acquiring each t's lock,
   698  // so we cannot hold any t's lock while acquiring ts.lock).
   699  //
   700  // Strictly speaking it *might* be okay to hold t.lock and
   701  // acquire ts.lock at the same time, because we know that
   702  // t is not in any ts.heap, so nothing holding a ts.lock would
   703  // be acquiring the t.lock at the same time, meaning there
   704  // isn't a possible deadlock. But it is easier and safer not to be
   705  // too clever and respect the static ordering.
   706  // (If we don't, we have to change the static lock checking of t and ts.)
   707  //
   708  // Concurrent calls to time.Timer.Reset or blockTimerChan
   709  // may result in concurrent calls to t.maybeAdd,
   710  // so we cannot assume that t is not in a heap on entry to t.maybeAdd.
   711  func (t *timer) maybeAdd() {
   712  	// Note: Not holding any locks on entry to t.maybeAdd,
   713  	// so the current g can be rescheduled to a different M and P
   714  	// at any time, including between the ts := assignment and the
   715  	// call to ts.lock. If a reschedule happened then, we would be
   716  	// adding t to some other P's timers, perhaps even a P that the scheduler
   717  	// has marked as idle with no timers, in which case the timer could
   718  	// go unnoticed until long after t.when.
   719  	// Calling acquirem instead of using getg().m makes sure that
   720  	// we end up locking and inserting into the current P's timers.
   721  	mp := acquirem()
   722  	var ts *timers
   723  	if t.isFake {
   724  		bubble := getg().bubble
   725  		if bubble == nil {
   726  			throw("invalid timer: fake time but no syncgroup")
   727  		}
   728  		ts = &bubble.timers
   729  	} else {
   730  		ts = &mp.p.ptr().timers
   731  	}
   732  	ts.lock()
   733  	ts.cleanHead()
   734  	t.lock()
   735  	t.trace("maybeAdd")
   736  	when := int64(0)
   737  	wake := false
   738  	if t.needsAdd() {
   739  		if t.isFake {
   740  			// Re-randomize timer order.
   741  			// We could do this for all timers, but unbubbled timers are highly
   742  			// unlikely to have the same when.
   743  			t.rand = cheaprand()
   744  		}
   745  		t.state |= timerHeaped
   746  		when = t.when
   747  		wakeTime := ts.wakeTime()
   748  		wake = wakeTime == 0 || when < wakeTime
   749  		ts.addHeap(t)
   750  	}
   751  	t.unlock()
   752  	ts.unlock()
   753  	releasem(mp)
   754  	if wake {
   755  		wakeNetPoller(when)
   756  	}
   757  }
   758  
   759  // reset resets the time when a timer should fire.
   760  // If used for an inactive timer, the timer will become active.
   761  // Reports whether the timer was active and was stopped.
   762  func (t *timer) reset(when, period int64) bool {
   763  	return t.modify(when, period, nil, nil, 0)
   764  }
   765  
   766  // cleanHead cleans up the head of the timer queue. This speeds up
   767  // programs that create and delete timers; leaving them in the heap
   768  // slows down heap operations.
   769  // The caller must have locked ts.
   770  func (ts *timers) cleanHead() {
   771  	ts.trace("cleanHead")
   772  	assertLockHeld(&ts.mu)
   773  	gp := getg()
   774  	for {
   775  		if len(ts.heap) == 0 {
   776  			return
   777  		}
   778  
   779  		// This loop can theoretically run for a while, and because
   780  		// it is holding timersLock it cannot be preempted.
   781  		// If someone is trying to preempt us, just return.
   782  		// We can clean the timers later.
   783  		if gp.preemptStop {
   784  			return
   785  		}
   786  
   787  		// Delete zombies from tail of heap. It requires no heap adjustments at all,
   788  		// and doing so increases the chances that when we swap out a zombie
   789  		// in heap[0] for the tail of the heap, we'll get a non-zombie timer,
   790  		// shortening this loop.
   791  		n := len(ts.heap)
   792  		if t := ts.heap[n-1].timer; t.astate.Load()&timerZombie != 0 {
   793  			t.lock()
   794  			if t.state&timerZombie != 0 {
   795  				t.state &^= timerHeaped | timerZombie | timerModified
   796  				t.ts = nil
   797  				ts.zombies.Add(-1)
   798  				ts.heap[n-1] = timerWhen{}
   799  				ts.heap = ts.heap[:n-1]
   800  			}
   801  			t.unlock()
   802  			continue
   803  		}
   804  
   805  		t := ts.heap[0].timer
   806  		if t.ts != ts {
   807  			throw("bad ts")
   808  		}
   809  
   810  		if t.astate.Load()&(timerModified|timerZombie) == 0 {
   811  			// Fast path: head of timers does not need adjustment.
   812  			return
   813  		}
   814  
   815  		t.lock()
   816  		updated := t.updateHeap()
   817  		t.unlock()
   818  		if !updated {
   819  			// Head of timers does not need adjustment.
   820  			return
   821  		}
   822  	}
   823  }
   824  
   825  // take moves any timers from src into ts
   826  // and then clears the timer state from src,
   827  // because src is being destroyed.
   828  // The caller must not have locked either timers.
   829  // For now this is only called when the world is stopped.
   830  func (ts *timers) take(src *timers) {
   831  	ts.trace("take")
   832  	assertWorldStopped()
   833  	if len(src.heap) > 0 {
   834  		// The world is stopped, so we ignore the locking of ts and src here.
   835  		// That would introduce a sched < timers lock ordering,
   836  		// which we'd rather avoid in the static ranking.
   837  		for _, tw := range src.heap {
   838  			t := tw.timer
   839  			t.ts = nil
   840  			if t.state&timerZombie != 0 {
   841  				t.state &^= timerHeaped | timerZombie | timerModified
   842  			} else {
   843  				t.state &^= timerModified
   844  				ts.addHeap(t)
   845  			}
   846  		}
   847  		src.heap = nil
   848  		src.zombies.Store(0)
   849  		src.minWhenHeap.Store(0)
   850  		src.minWhenModified.Store(0)
   851  		src.len.Store(0)
   852  		ts.len.Store(uint32(len(ts.heap)))
   853  	}
   854  }
   855  
   856  // adjust looks through the timers in ts.heap for
   857  // any timers that have been modified to run earlier, and puts them in
   858  // the correct place in the heap. While looking for those timers,
   859  // it also moves timers that have been modified to run later,
   860  // and removes deleted timers. The caller must have locked ts.
   861  func (ts *timers) adjust(now int64, force bool) {
   862  	ts.trace("adjust")
   863  	assertLockHeld(&ts.mu)
   864  	// If we haven't yet reached the time of the earliest modified
   865  	// timer, don't do anything. This speeds up programs that adjust
   866  	// a lot of timers back and forth if the timers rarely expire.
   867  	// We'll postpone looking through all the adjusted timers until
   868  	// one would actually expire.
   869  	if !force {
   870  		first := ts.minWhenModified.Load()
   871  		if first == 0 || first > now {
   872  			if verifyTimers {
   873  				ts.verify()
   874  			}
   875  			return
   876  		}
   877  	}
   878  
   879  	// minWhenModified is a lower bound on the earliest t.when
   880  	// among the timerModified timers. We want to make it more precise:
   881  	// we are going to scan the heap and clean out all the timerModified bits,
   882  	// at which point minWhenModified can be set to 0 (indicating none at all).
   883  	//
   884  	// Other P's can be calling ts.wakeTime concurrently, and we'd like to
   885  	// keep ts.wakeTime returning an accurate value throughout this entire process.
   886  	//
   887  	// Setting minWhenModified = 0 *before* the scan could make wakeTime
   888  	// return an incorrect value: if minWhenModified < minWhenHeap, then clearing
   889  	// it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes.
   890  	// To avoid that, we want to set minWhenModified to 0 *after* the scan.
   891  	//
   892  	// Setting minWhenModified = 0 *after* the scan could result in missing
   893  	// concurrent timer modifications in other goroutines; those will lock
   894  	// the specific timer, set the timerModified bit, and set t.when.
   895  	// To avoid that, we want to set minWhenModified to 0 *before* the scan.
   896  	//
   897  	// The way out of this dilemma is to preserve wakeTime a different way.
   898  	// wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap
   899  	// is protected by ts.lock, which we hold, so we can modify it however we like
   900  	// in service of keeping wakeTime accurate.
   901  	//
   902  	// So we can:
   903  	//
   904  	//	1. Set minWhenHeap = min(minWhenHeap, minWhenModified)
   905  	//	2. Set minWhenModified = 0
   906  	//	   (Other goroutines may modify timers and update minWhenModified now.)
   907  	//	3. Scan timers
   908  	//	4. Set minWhenHeap = heap[0].when
   909  	//
   910  	// That order preserves a correct value of wakeTime throughout the entire
   911  	// operation:
   912  	// Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared.
   913  	// Step 2 makes sure concurrent t.when updates are not lost during the scan.
   914  	// Step 3 processes all modified timer values, justifying minWhenModified = 0.
   915  	// Step 4 corrects minWhenHeap to a precise value.
   916  	//
   917  	// The wakeTime method implementation reads minWhenModified *before* minWhenHeap,
   918  	// so that if the minWhenModified is observed to be 0, that means the minWhenHeap that
   919  	// follows will include the information that was zeroed out of it.
   920  	//
   921  	// Originally Step 3 locked every timer, which made sure any timer update that was
   922  	// already in progress during Steps 1+2 completed and was observed by Step 3.
   923  	// All that locking was too expensive, so now we do an atomic load of t.astate to
   924  	// decide whether we need to do a full lock. To make sure that we still observe any
   925  	// timer update already in progress during Steps 1+2, t.modify sets timerModified
   926  	// in t.astate *before* calling t.updateMinWhenModified. That ensures that the
   927  	// overwrite in Step 2 cannot lose an update: if it does overwrite an update, Step 3
   928  	// will see the timerModified and do a full lock.
   929  	ts.minWhenHeap.Store(ts.wakeTime())
   930  	ts.minWhenModified.Store(0)
   931  
   932  	changed := false
   933  	for i := 0; i < len(ts.heap); i++ {
   934  		tw := &ts.heap[i]
   935  		t := tw.timer
   936  		if t.ts != ts {
   937  			throw("bad ts")
   938  		}
   939  
   940  		if t.astate.Load()&(timerModified|timerZombie) == 0 {
   941  			// Does not need adjustment.
   942  			continue
   943  		}
   944  
   945  		t.lock()
   946  		switch {
   947  		case t.state&timerHeaped == 0:
   948  			badTimer()
   949  
   950  		case t.state&timerZombie != 0:
   951  			ts.zombies.Add(-1)
   952  			t.state &^= timerHeaped | timerZombie | timerModified
   953  			n := len(ts.heap)
   954  			ts.heap[i] = ts.heap[n-1]
   955  			ts.heap[n-1] = timerWhen{}
   956  			ts.heap = ts.heap[:n-1]
   957  			t.ts = nil
   958  			i--
   959  			changed = true
   960  
   961  		case t.state&timerModified != 0:
   962  			tw.when = t.when
   963  			t.state &^= timerModified
   964  			changed = true
   965  		}
   966  		t.unlock()
   967  	}
   968  
   969  	if changed {
   970  		ts.initHeap()
   971  	}
   972  	ts.updateMinWhenHeap()
   973  
   974  	if verifyTimers {
   975  		ts.verify()
   976  	}
   977  }
   978  
   979  // wakeTime looks at ts's timers and returns the time when we
   980  // should wake up the netpoller. It returns 0 if there are no timers.
   981  // This function is invoked when dropping a P, so it must run without
   982  // any write barriers.
   983  //
   984  //go:nowritebarrierrec
   985  func (ts *timers) wakeTime() int64 {
   986  	// Note that the order of these two loads matters:
   987  	// adjust updates minWhen to make it safe to clear minNextWhen.
   988  	// We read minWhen after reading minNextWhen so that
   989  	// if we see a cleared minNextWhen, we are guaranteed to see
   990  	// the updated minWhen.
   991  	nextWhen := ts.minWhenModified.Load()
   992  	when := ts.minWhenHeap.Load()
   993  	if when == 0 || (nextWhen != 0 && nextWhen < when) {
   994  		when = nextWhen
   995  	}
   996  	return when
   997  }
   998  
   999  // check runs any timers in ts that are ready.
  1000  // If now is not 0 it is the current time.
  1001  // It returns the passed time or the current time if now was passed as 0.
  1002  // and the time when the next timer should run or 0 if there is no next timer,
  1003  // and reports whether it ran any timers.
  1004  // If the time when the next timer should run is not 0,
  1005  // it is always larger than the returned time.
  1006  // We pass now in and out to avoid extra calls of nanotime.
  1007  //
  1008  //go:yeswritebarrierrec
  1009  func (ts *timers) check(now int64, bubble *synctestBubble) (rnow, pollUntil int64, ran bool) {
  1010  	ts.trace("check")
  1011  	// If it's not yet time for the first timer, or the first adjusted
  1012  	// timer, then there is nothing to do.
  1013  	next := ts.wakeTime()
  1014  	if next == 0 {
  1015  		// No timers to run or adjust.
  1016  		return now, 0, false
  1017  	}
  1018  
  1019  	if now == 0 {
  1020  		now = nanotime()
  1021  	}
  1022  
  1023  	// If this is the local P, and there are a lot of deleted timers,
  1024  	// clear them out. We only do this for the local P to reduce
  1025  	// lock contention on timersLock.
  1026  	zombies := ts.zombies.Load()
  1027  	if zombies < 0 {
  1028  		badTimer()
  1029  	}
  1030  	force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4
  1031  
  1032  	if now < next && !force {
  1033  		// Next timer is not ready to run, and we don't need to clear deleted timers.
  1034  		return now, next, false
  1035  	}
  1036  
  1037  	ts.lock()
  1038  	if len(ts.heap) > 0 {
  1039  		ts.adjust(now, false)
  1040  		for len(ts.heap) > 0 {
  1041  			// Note that runtimer may temporarily unlock ts.
  1042  			if tw := ts.run(now, bubble); tw != 0 {
  1043  				if tw > 0 {
  1044  					pollUntil = tw
  1045  				}
  1046  				break
  1047  			}
  1048  			ran = true
  1049  		}
  1050  
  1051  		// Note: Delaying the forced adjustment until after the ts.run
  1052  		// (as opposed to calling ts.adjust(now, force) above)
  1053  		// is significantly faster under contention, such as in
  1054  		// package time's BenchmarkTimerAdjust10000,
  1055  		// though we do not fully understand why.
  1056  		force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
  1057  		if force {
  1058  			ts.adjust(now, true)
  1059  		}
  1060  	}
  1061  	ts.unlock()
  1062  
  1063  	return now, pollUntil, ran
  1064  }
  1065  
  1066  // run examines the first timer in ts. If it is ready based on now,
  1067  // it runs the timer and removes or updates it.
  1068  // Returns 0 if it ran a timer, -1 if there are no more timers, or the time
  1069  // when the first timer should run.
  1070  // The caller must have locked ts.
  1071  // If a timer is run, this will temporarily unlock ts.
  1072  //
  1073  //go:systemstack
  1074  func (ts *timers) run(now int64, bubble *synctestBubble) int64 {
  1075  	ts.trace("run")
  1076  	assertLockHeld(&ts.mu)
  1077  Redo:
  1078  	if len(ts.heap) == 0 {
  1079  		return -1
  1080  	}
  1081  	tw := ts.heap[0]
  1082  	t := tw.timer
  1083  	if t.ts != ts {
  1084  		throw("bad ts")
  1085  	}
  1086  
  1087  	if t.astate.Load()&(timerModified|timerZombie) == 0 && tw.when > now {
  1088  		// Fast path: not ready to run.
  1089  		return tw.when
  1090  	}
  1091  
  1092  	t.lock()
  1093  	if t.updateHeap() {
  1094  		t.unlock()
  1095  		goto Redo
  1096  	}
  1097  
  1098  	if t.state&timerHeaped == 0 || t.state&timerModified != 0 {
  1099  		badTimer()
  1100  	}
  1101  
  1102  	if t.when > now {
  1103  		// Not ready to run.
  1104  		t.unlock()
  1105  		return t.when
  1106  	}
  1107  
  1108  	t.unlockAndRun(now, bubble)
  1109  	assertLockHeld(&ts.mu) // t is unlocked now, but not ts
  1110  	return 0
  1111  }
  1112  
  1113  // unlockAndRun unlocks and runs the timer t (which must be locked).
  1114  // If t is in a timer set (t.ts != nil), the caller must also have locked the timer set,
  1115  // and this call will temporarily unlock the timer set while running the timer function.
  1116  // unlockAndRun returns with t unlocked and t.ts (re-)locked.
  1117  //
  1118  //go:systemstack
  1119  func (t *timer) unlockAndRun(now int64, bubble *synctestBubble) {
  1120  	t.trace("unlockAndRun")
  1121  	assertLockHeld(&t.mu)
  1122  	if t.ts != nil {
  1123  		assertLockHeld(&t.ts.mu)
  1124  	}
  1125  	if raceenabled {
  1126  		// Note that we are running on a system stack,
  1127  		// so there is no chance of getg().m being reassigned
  1128  		// out from under us while this function executes.
  1129  		gp := getg()
  1130  		var tsLocal *timers
  1131  		if bubble == nil {
  1132  			tsLocal = &gp.m.p.ptr().timers
  1133  		} else {
  1134  			tsLocal = &bubble.timers
  1135  		}
  1136  		if tsLocal.raceCtx == 0 {
  1137  			tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum)
  1138  		}
  1139  		raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t))
  1140  	}
  1141  
  1142  	if t.state&(timerModified|timerZombie) != 0 {
  1143  		badTimer()
  1144  	}
  1145  
  1146  	f := t.f
  1147  	arg := t.arg
  1148  	seq := t.seq
  1149  	var next int64
  1150  	delay := now - t.when
  1151  	if t.period > 0 {
  1152  		// Leave in heap but adjust next time to fire.
  1153  		next = t.when + t.period*(1+delay/t.period)
  1154  		if next < 0 { // check for overflow.
  1155  			next = maxWhen
  1156  		}
  1157  	} else {
  1158  		next = 0
  1159  	}
  1160  	ts := t.ts
  1161  	t.when = next
  1162  	if t.state&timerHeaped != 0 {
  1163  		t.state |= timerModified
  1164  		if next == 0 {
  1165  			t.state |= timerZombie
  1166  			t.ts.zombies.Add(1)
  1167  		}
  1168  		t.updateHeap()
  1169  	}
  1170  
  1171  	async := debug.asynctimerchan.Load() != 0
  1172  	if !async && t.isChan && t.period == 0 {
  1173  		// Tell Stop/Reset that we are sending a value.
  1174  		if t.isSending.Add(1) < 0 {
  1175  			throw("too many concurrent timer firings")
  1176  		}
  1177  	}
  1178  
  1179  	t.unlock()
  1180  
  1181  	if raceenabled {
  1182  		// Temporarily use the current P's racectx for g0.
  1183  		gp := getg()
  1184  		if gp.racectx != 0 {
  1185  			throw("unexpected racectx")
  1186  		}
  1187  		if bubble == nil {
  1188  			gp.racectx = gp.m.p.ptr().timers.raceCtx
  1189  		} else {
  1190  			gp.racectx = bubble.timers.raceCtx
  1191  		}
  1192  	}
  1193  
  1194  	if ts != nil {
  1195  		ts.unlock()
  1196  	}
  1197  
  1198  	if bubble != nil {
  1199  		// Temporarily use the timer's synctest group for the G running this timer.
  1200  		gp := getg()
  1201  		if gp.bubble != nil {
  1202  			throw("unexpected syncgroup set")
  1203  		}
  1204  		gp.bubble = bubble
  1205  		bubble.changegstatus(gp, _Gdead, _Grunning)
  1206  	}
  1207  
  1208  	if !async && t.isChan {
  1209  		// For a timer channel, we want to make sure that no stale sends
  1210  		// happen after a t.stop or t.modify, but we cannot hold t.mu
  1211  		// during the actual send (which f does) due to lock ordering.
  1212  		// It can happen that we are holding t's lock above, we decide
  1213  		// it's time to send a time value (by calling f), grab the parameters,
  1214  		// unlock above, and then a t.stop or t.modify changes the timer
  1215  		// and returns. At that point, the send needs not to happen after all.
  1216  		// The way we arrange for it not to happen is that t.stop and t.modify
  1217  		// both increment t.seq while holding both t.mu and t.sendLock.
  1218  		// We copied the seq value above while holding t.mu.
  1219  		// Now we can acquire t.sendLock (which will be held across the send)
  1220  		// and double-check that t.seq is still the seq value we saw above.
  1221  		// If not, the timer has been updated and we should skip the send.
  1222  		// We skip the send by reassigning f to a no-op function.
  1223  		//
  1224  		// The isSending field tells t.stop or t.modify that we have
  1225  		// started to send the value. That lets them correctly return
  1226  		// true meaning that no value was sent.
  1227  		lock(&t.sendLock)
  1228  
  1229  		if t.period == 0 {
  1230  			// We are committed to possibly sending a value
  1231  			// based on seq, so no need to keep telling
  1232  			// stop/modify that we are sending.
  1233  			if t.isSending.Add(-1) < 0 {
  1234  				throw("mismatched isSending updates")
  1235  			}
  1236  		}
  1237  
  1238  		if t.seq != seq {
  1239  			f = func(any, uintptr, int64) {}
  1240  		}
  1241  	}
  1242  
  1243  	f(arg, seq, delay)
  1244  
  1245  	if !async && t.isChan {
  1246  		unlock(&t.sendLock)
  1247  	}
  1248  
  1249  	if bubble != nil {
  1250  		gp := getg()
  1251  		bubble.changegstatus(gp, _Grunning, _Gdead)
  1252  		if raceenabled {
  1253  			// Establish a happens-before between this timer event and
  1254  			// the next synctest.Wait call.
  1255  			racereleasemergeg(gp, bubble.raceaddr())
  1256  		}
  1257  		gp.bubble = nil
  1258  	}
  1259  
  1260  	if ts != nil {
  1261  		ts.lock()
  1262  	}
  1263  
  1264  	if raceenabled {
  1265  		gp := getg()
  1266  		gp.racectx = 0
  1267  	}
  1268  }
  1269  
  1270  // verifyTimerHeap verifies that the timers is in a valid state.
  1271  // This is only for debugging, and is only called if verifyTimers is true.
  1272  // The caller must have locked ts.
  1273  func (ts *timers) verify() {
  1274  	assertLockHeld(&ts.mu)
  1275  	for i, tw := range ts.heap {
  1276  		if i == 0 {
  1277  			// First timer has no parent.
  1278  			continue
  1279  		}
  1280  
  1281  		// The heap is timerHeapN-ary. See siftupTimer and siftdownTimer.
  1282  		p := int(uint(i-1) / timerHeapN)
  1283  		if tw.less(ts.heap[p]) {
  1284  			print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n")
  1285  			throw("bad timer heap")
  1286  		}
  1287  	}
  1288  	if n := int(ts.len.Load()); len(ts.heap) != n {
  1289  		println("timer heap len", len(ts.heap), "!= atomic len", n)
  1290  		throw("bad timer heap len")
  1291  	}
  1292  }
  1293  
  1294  // updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when.
  1295  // The caller must have locked ts or the world must be stopped.
  1296  func (ts *timers) updateMinWhenHeap() {
  1297  	assertWorldStoppedOrLockHeld(&ts.mu)
  1298  	if len(ts.heap) == 0 {
  1299  		ts.minWhenHeap.Store(0)
  1300  	} else {
  1301  		ts.minWhenHeap.Store(ts.heap[0].when)
  1302  	}
  1303  }
  1304  
  1305  // updateMinWhenModified updates ts.minWhenModified to be <= when.
  1306  // ts need not be (and usually is not) locked.
  1307  func (ts *timers) updateMinWhenModified(when int64) {
  1308  	for {
  1309  		old := ts.minWhenModified.Load()
  1310  		if old != 0 && old < when {
  1311  			return
  1312  		}
  1313  		if ts.minWhenModified.CompareAndSwap(old, when) {
  1314  			return
  1315  		}
  1316  	}
  1317  }
  1318  
  1319  // timeSleepUntil returns the time when the next timer should fire. Returns
  1320  // maxWhen if there are no timers.
  1321  // This is only called by sysmon and checkdead.
  1322  func timeSleepUntil() int64 {
  1323  	next := int64(maxWhen)
  1324  
  1325  	// Prevent allp slice changes. This is like retake.
  1326  	lock(&allpLock)
  1327  	for _, pp := range allp {
  1328  		if pp == nil {
  1329  			// This can happen if procresize has grown
  1330  			// allp but not yet created new Ps.
  1331  			continue
  1332  		}
  1333  
  1334  		if w := pp.timers.wakeTime(); w != 0 {
  1335  			next = min(next, w)
  1336  		}
  1337  	}
  1338  	unlock(&allpLock)
  1339  
  1340  	return next
  1341  }
  1342  
  1343  const timerHeapN = 4
  1344  
  1345  // Heap maintenance algorithms.
  1346  // These algorithms check for slice index errors manually.
  1347  // Slice index error can happen if the program is using racy
  1348  // access to timers. We don't want to panic here, because
  1349  // it will cause the program to crash with a mysterious
  1350  // "panic holding locks" message. Instead, we panic while not
  1351  // holding a lock.
  1352  
  1353  // siftUp puts the timer at position i in the right place
  1354  // in the heap by moving it up toward the top of the heap.
  1355  func (ts *timers) siftUp(i int) {
  1356  	heap := ts.heap
  1357  	if i >= len(heap) {
  1358  		badTimer()
  1359  	}
  1360  	tw := heap[i]
  1361  	if tw.when <= 0 {
  1362  		badTimer()
  1363  	}
  1364  	for i > 0 {
  1365  		p := int(uint(i-1) / timerHeapN) // parent
  1366  		if !tw.less(heap[p]) {
  1367  			break
  1368  		}
  1369  		heap[i] = heap[p]
  1370  		i = p
  1371  	}
  1372  	if heap[i].timer != tw.timer {
  1373  		heap[i] = tw
  1374  	}
  1375  }
  1376  
  1377  // siftDown puts the timer at position i in the right place
  1378  // in the heap by moving it down toward the bottom of the heap.
  1379  func (ts *timers) siftDown(i int) {
  1380  	heap := ts.heap
  1381  	n := len(heap)
  1382  	if i >= n {
  1383  		badTimer()
  1384  	}
  1385  	if i*timerHeapN+1 >= n {
  1386  		return
  1387  	}
  1388  	tw := heap[i]
  1389  	if tw.when <= 0 {
  1390  		badTimer()
  1391  	}
  1392  	for {
  1393  		leftChild := i*timerHeapN + 1
  1394  		if leftChild >= n {
  1395  			break
  1396  		}
  1397  		w := tw
  1398  		c := -1
  1399  		for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] {
  1400  			if tw.less(w) {
  1401  				w = tw
  1402  				c = leftChild + j
  1403  			}
  1404  		}
  1405  		if c < 0 {
  1406  			break
  1407  		}
  1408  		heap[i] = heap[c]
  1409  		i = c
  1410  	}
  1411  	if heap[i].timer != tw.timer {
  1412  		heap[i] = tw
  1413  	}
  1414  }
  1415  
  1416  // initHeap reestablishes the heap order in the slice ts.heap.
  1417  // It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations.
  1418  func (ts *timers) initHeap() {
  1419  	// Last possible element that needs sifting down is parent of last element;
  1420  	// last element is len(t)-1; parent of last element is (len(t)-1-1)/timerHeapN.
  1421  	if len(ts.heap) <= 1 {
  1422  		return
  1423  	}
  1424  	for i := int(uint(len(ts.heap)-1-1) / timerHeapN); i >= 0; i-- {
  1425  		ts.siftDown(i)
  1426  	}
  1427  }
  1428  
  1429  // badTimer is called if the timer data structures have been corrupted,
  1430  // presumably due to racy use by the program. We panic here rather than
  1431  // panicking due to invalid slice access while holding locks.
  1432  // See issue #25686.
  1433  func badTimer() {
  1434  	throw("timer data corruption")
  1435  }
  1436  
  1437  // Timer channels.
  1438  
  1439  // maybeRunChan checks whether the timer needs to run
  1440  // to send a value to its associated channel. If so, it does.
  1441  // The timer must not be locked.
  1442  func (t *timer) maybeRunChan(c *hchan) {
  1443  	if t.isFake && getg().bubble != c.bubble {
  1444  		// This should have been checked by the caller, but check just in case.
  1445  		fatal("synctest timer accessed from outside bubble")
  1446  	}
  1447  	if t.astate.Load()&timerHeaped != 0 {
  1448  		// If the timer is in the heap, the ordinary timer code
  1449  		// is in charge of sending when appropriate.
  1450  		return
  1451  	}
  1452  
  1453  	t.lock()
  1454  	now := nanotime()
  1455  	if t.isFake {
  1456  		now = getg().bubble.now
  1457  	}
  1458  	if t.state&timerHeaped != 0 || t.when == 0 || t.when > now {
  1459  		t.trace("maybeRunChan-")
  1460  		// Timer in the heap, or not running at all, or not triggered.
  1461  		t.unlock()
  1462  		return
  1463  	}
  1464  	t.trace("maybeRunChan+")
  1465  	systemstack(func() {
  1466  		t.unlockAndRun(now, c.bubble)
  1467  	})
  1468  }
  1469  
  1470  // blockTimerChan is called when a channel op has decided to block on c.
  1471  // The caller holds the channel lock for c and possibly other channels.
  1472  // blockTimerChan makes sure that c is in a timer heap,
  1473  // adding it if needed.
  1474  func blockTimerChan(c *hchan) {
  1475  	t := c.timer
  1476  	if t.isFake && c.bubble != getg().bubble {
  1477  		// This should have been checked by the caller, but check just in case.
  1478  		fatal("synctest timer accessed from outside bubble")
  1479  	}
  1480  
  1481  	t.lock()
  1482  	t.trace("blockTimerChan")
  1483  	if !t.isChan {
  1484  		badTimer()
  1485  	}
  1486  
  1487  	t.blocked++
  1488  
  1489  	// If this is the first enqueue after a recent dequeue,
  1490  	// the timer may still be in the heap but marked as a zombie.
  1491  	// Unmark it in this case, if the timer is still pending.
  1492  	if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 {
  1493  		t.state &^= timerZombie
  1494  		t.ts.zombies.Add(-1)
  1495  	}
  1496  
  1497  	// t.maybeAdd must be called with t unlocked,
  1498  	// because it needs to lock t.ts before t.
  1499  	// Then it will do nothing if t.needsAdd(state) is false.
  1500  	// Check that now before the unlock,
  1501  	// avoiding the extra lock-lock-unlock-unlock
  1502  	// inside maybeAdd when t does not need to be added.
  1503  	add := t.needsAdd()
  1504  	t.unlock()
  1505  	if add {
  1506  		t.maybeAdd()
  1507  	}
  1508  }
  1509  
  1510  // unblockTimerChan is called when a channel op that was blocked on c
  1511  // is no longer blocked. Every call to blockTimerChan must be paired with
  1512  // a call to unblockTimerChan.
  1513  // The caller holds the channel lock for c and possibly other channels.
  1514  // unblockTimerChan removes c from the timer heap when nothing is
  1515  // blocked on it anymore.
  1516  func unblockTimerChan(c *hchan) {
  1517  	t := c.timer
  1518  	t.lock()
  1519  	t.trace("unblockTimerChan")
  1520  	if !t.isChan || t.blocked == 0 {
  1521  		badTimer()
  1522  	}
  1523  	t.blocked--
  1524  	if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 {
  1525  		// Last goroutine that was blocked on this timer.
  1526  		// Mark for removal from heap but do not clear t.when,
  1527  		// so that we know what time it is still meant to trigger.
  1528  		t.state |= timerZombie
  1529  		t.ts.zombies.Add(1)
  1530  	}
  1531  	t.unlock()
  1532  }
  1533  

View as plain text