Source file src/runtime/time.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Time-related runtime and pieces of package time. 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "internal/runtime/atomic" 12 "runtime/internal/sys" 13 "unsafe" 14 ) 15 16 // A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq). 17 // Timers are allocated by client code, often as part of other data structures. 18 // Each P has a heap of pointers to timers that it manages. 19 // 20 // A timer is expected to be used by only one client goroutine at a time, 21 // but there will be concurrent access by the P managing that timer. 22 // Timer accesses are protected by the lock t.mu, with a snapshot of 23 // t's state bits published in t.astate to enable certain fast paths to make 24 // decisions about a timer without acquiring the lock. 25 type timer struct { 26 // mu protects reads and writes to all fields, with exceptions noted below. 27 mu mutex 28 29 astate atomic.Uint8 // atomic copy of state bits at last unlock 30 state uint8 // state bits 31 isChan bool // timer has a channel; immutable; can be read without lock 32 blocked uint32 // number of goroutines blocked on timer's channel 33 34 // Timer wakes up at when, and then at when+period, ... (period > 0 only) 35 // each time calling f(arg, seq, delay) in the timer goroutine, so f must be 36 // a well-behaved function and not block. 37 // 38 // The arg and seq are client-specified opaque arguments passed back to f. 39 // When used from netpoll, arg and seq have meanings defined by netpoll 40 // and are completely opaque to this code; in that context, seq is a sequence 41 // number to recognize and squech stale function invocations. 42 // When used from package time, arg is a channel (for After, NewTicker) 43 // or the function to call (for AfterFunc) and seq is unused (0). 44 // 45 // Package time does not know about seq, but if this is a channel timer (t.isChan == true), 46 // this file uses t.seq as a sequence number to recognize and squelch 47 // sends that correspond to an earlier (stale) timer configuration, 48 // similar to its use in netpoll. In this usage (that is, when t.isChan == true), 49 // writes to seq are protected by both t.mu and t.sendLock, 50 // so reads are allowed when holding either of the two mutexes. 51 // 52 // The delay argument is nanotime() - t.when, meaning the delay in ns between 53 // when the timer should have gone off and now. Normally that amount is 54 // small enough not to matter, but for channel timers that are fed lazily, 55 // the delay can be arbitrarily long; package time subtracts it out to make 56 // it look like the send happened earlier than it actually did. 57 // (No one looked at the channel since then, or the send would have 58 // not happened so late, so no one can tell the difference.) 59 when int64 60 period int64 61 f func(arg any, seq uintptr, delay int64) 62 arg any 63 seq uintptr 64 65 // If non-nil, the timers containing t. 66 ts *timers 67 68 // sendLock protects sends on the timer's channel. 69 // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. 70 sendLock mutex 71 } 72 73 // init initializes a newly allocated timer t. 74 // Any code that allocates a timer must call t.init before using it. 75 // The arg and f can be set during init, or they can be nil in init 76 // and set by a future call to t.modify. 77 func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) { 78 lockInit(&t.mu, lockRankTimer) 79 t.f = f 80 t.arg = arg 81 } 82 83 // A timers is a per-P set of timers. 84 type timers struct { 85 // mu protects timers; timers are per-P, but the scheduler can 86 // access the timers of another P, so we have to lock. 87 mu mutex 88 89 // heap is the set of timers, ordered by heap[i].when. 90 // Must hold lock to access. 91 heap []timerWhen 92 93 // len is an atomic copy of len(heap). 94 len atomic.Uint32 95 96 // zombies is the number of timers in the heap 97 // that are marked for removal. 98 zombies atomic.Int32 99 100 // raceCtx is the race context used while executing timer functions. 101 raceCtx uintptr 102 103 // minWhenHeap is the minimum heap[i].when value (= heap[0].when). 104 // The wakeTime method uses minWhenHeap and minWhenModified 105 // to determine the next wake time. 106 // If minWhenHeap = 0, it means there are no timers in the heap. 107 minWhenHeap atomic.Int64 108 109 // minWhenModified is a lower bound on the minimum 110 // heap[i].when over timers with the timerModified bit set. 111 // If minWhenModified = 0, it means there are no timerModified timers in the heap. 112 minWhenModified atomic.Int64 113 } 114 115 type timerWhen struct { 116 timer *timer 117 when int64 118 } 119 120 func (ts *timers) lock() { 121 lock(&ts.mu) 122 } 123 124 func (ts *timers) unlock() { 125 // Update atomic copy of len(ts.heap). 126 // We only update at unlock so that the len is always 127 // the most recent unlocked length, not an ephemeral length. 128 // This matters if we lock ts, delete the only timer from the heap, 129 // add it back, and unlock. We want ts.len.Load to return 1 the 130 // entire time, never 0. This is important for pidleput deciding 131 // whether ts is empty. 132 ts.len.Store(uint32(len(ts.heap))) 133 134 unlock(&ts.mu) 135 } 136 137 // Timer state field. 138 const ( 139 // timerHeaped is set when the timer is stored in some P's heap. 140 timerHeaped uint8 = 1 << iota 141 142 // timerModified is set when t.when has been modified 143 // but the heap's heap[i].when entry still needs to be updated. 144 // That change waits until the heap in which 145 // the timer appears can be locked and rearranged. 146 // timerModified is only set when timerHeaped is also set. 147 timerModified 148 149 // timerZombie is set when the timer has been stopped 150 // but is still present in some P's heap. 151 // Only set when timerHeaped is also set. 152 // It is possible for timerModified and timerZombie to both 153 // be set, meaning that the timer was modified and then stopped. 154 // A timer sending to a channel may be placed in timerZombie 155 // to take it out of the heap even though the timer is not stopped, 156 // as long as nothing is reading from the channel. 157 timerZombie 158 ) 159 160 // timerDebug enables printing a textual debug trace of all timer operations to stderr. 161 const timerDebug = false 162 163 func (t *timer) trace(op string) { 164 if timerDebug { 165 t.trace1(op) 166 } 167 } 168 169 func (t *timer) trace1(op string) { 170 if !timerDebug { 171 return 172 } 173 bits := [4]string{"h", "m", "z", "c"} 174 for i := range 3 { 175 if t.state&(1<<i) == 0 { 176 bits[i] = "-" 177 } 178 } 179 if !t.isChan { 180 bits[3] = "-" 181 } 182 print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n") 183 } 184 185 func (ts *timers) trace(op string) { 186 if timerDebug { 187 println("TS", ts, op) 188 } 189 } 190 191 // lock locks the timer, allowing reading or writing any of the timer fields. 192 func (t *timer) lock() { 193 lock(&t.mu) 194 t.trace("lock") 195 } 196 197 // unlock updates t.astate and unlocks the timer. 198 func (t *timer) unlock() { 199 t.trace("unlock") 200 // Let heap fast paths know whether heap[i].when is accurate. 201 // Also let maybeRunChan know whether channel is in heap. 202 t.astate.Store(t.state) 203 unlock(&t.mu) 204 } 205 206 // hchan returns the channel in t.arg. 207 // t must be a timer with a channel. 208 func (t *timer) hchan() *hchan { 209 if !t.isChan { 210 badTimer() 211 } 212 // Note: t.arg is a chan time.Time, 213 // and runtime cannot refer to that type, 214 // so we cannot use a type assertion. 215 return (*hchan)(efaceOf(&t.arg).data) 216 } 217 218 // updateHeap updates t as directed by t.state, updating t.state 219 // and returning a bool indicating whether the state (and ts.heap[0].when) changed. 220 // The caller must hold t's lock, or the world can be stopped instead. 221 // The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap 222 // takes care of moving t within the timers heap to preserve the heap invariants. 223 // If ts == nil, then t must not be in a heap (or is in a heap that is 224 // temporarily not maintaining its invariant, such as during timers.adjust). 225 func (t *timer) updateHeap() (updated bool) { 226 assertWorldStoppedOrLockHeld(&t.mu) 227 t.trace("updateHeap") 228 ts := t.ts 229 if ts == nil || t != ts.heap[0].timer { 230 badTimer() 231 } 232 assertLockHeld(&ts.mu) 233 if t.state&timerZombie != 0 { 234 // Take timer out of heap. 235 t.state &^= timerHeaped | timerZombie | timerModified 236 ts.zombies.Add(-1) 237 ts.deleteMin() 238 return true 239 } 240 241 if t.state&timerModified != 0 { 242 // Update ts.heap[0].when and move within heap. 243 t.state &^= timerModified 244 ts.heap[0].when = t.when 245 ts.siftDown(0) 246 ts.updateMinWhenHeap() 247 return true 248 } 249 250 return false 251 } 252 253 // maxWhen is the maximum value for timer's when field. 254 const maxWhen = 1<<63 - 1 255 256 // verifyTimers can be set to true to add debugging checks that the 257 // timer heaps are valid. 258 const verifyTimers = false 259 260 // Package time APIs. 261 // Godoc uses the comments in package time, not these. 262 263 // time.now is implemented in assembly. 264 265 // timeSleep puts the current goroutine to sleep for at least ns nanoseconds. 266 // 267 //go:linkname timeSleep time.Sleep 268 func timeSleep(ns int64) { 269 if ns <= 0 { 270 return 271 } 272 273 gp := getg() 274 t := gp.timer 275 if t == nil { 276 t = new(timer) 277 t.init(goroutineReady, gp) 278 gp.timer = t 279 } 280 when := nanotime() + ns 281 if when < 0 { // check for overflow. 282 when = maxWhen 283 } 284 gp.sleepWhen = when 285 gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1) 286 } 287 288 // resetForSleep is called after the goroutine is parked for timeSleep. 289 // We can't call timer.reset in timeSleep itself because if this is a short 290 // sleep and there are many goroutines then the P can wind up running the 291 // timer function, goroutineReady, before the goroutine has been parked. 292 func resetForSleep(gp *g, _ unsafe.Pointer) bool { 293 gp.timer.reset(gp.sleepWhen, 0) 294 return true 295 } 296 297 // A timeTimer is a runtime-allocated time.Timer or time.Ticker 298 // with the additional runtime state following it. 299 // The runtime state is inaccessible to package time. 300 type timeTimer struct { 301 c unsafe.Pointer // <-chan time.Time 302 init bool 303 timer 304 } 305 306 // newTimer allocates and returns a new time.Timer or time.Ticker (same layout) 307 // with the given parameters. 308 // 309 //go:linkname newTimer time.newTimer 310 func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer { 311 t := new(timeTimer) 312 t.timer.init(nil, nil) 313 t.trace("new") 314 if raceenabled { 315 racerelease(unsafe.Pointer(&t.timer)) 316 } 317 if c != nil { 318 lockInit(&t.sendLock, lockRankTimerSend) 319 t.isChan = true 320 c.timer = &t.timer 321 if c.dataqsiz == 0 { 322 throw("invalid timer channel: no capacity") 323 } 324 } 325 t.modify(when, period, f, arg, 0) 326 t.init = true 327 return t 328 } 329 330 // stopTimer stops a timer. 331 // It reports whether t was stopped before being run. 332 // 333 //go:linkname stopTimer time.stopTimer 334 func stopTimer(t *timeTimer) bool { 335 return t.stop() 336 } 337 338 // resetTimer resets an inactive timer, adding it to the timer heap. 339 // 340 // Reports whether the timer was modified before it was run. 341 // 342 //go:linkname resetTimer time.resetTimer 343 func resetTimer(t *timeTimer, when, period int64) bool { 344 if raceenabled { 345 racerelease(unsafe.Pointer(&t.timer)) 346 } 347 return t.reset(when, period) 348 } 349 350 // Go runtime. 351 352 // Ready the goroutine arg. 353 func goroutineReady(arg any, _ uintptr, _ int64) { 354 goready(arg.(*g), 0) 355 } 356 357 // addHeap adds t to the timers heap. 358 // The caller must hold ts.lock or the world must be stopped. 359 // The caller must also have checked that t belongs in the heap. 360 // Callers that are not sure can call t.maybeAdd instead, 361 // but note that maybeAdd has different locking requirements. 362 func (ts *timers) addHeap(t *timer) { 363 assertWorldStoppedOrLockHeld(&ts.mu) 364 // Timers rely on the network poller, so make sure the poller 365 // has started. 366 if netpollInited.Load() == 0 { 367 netpollGenericInit() 368 } 369 370 if t.ts != nil { 371 throw("ts set in timer") 372 } 373 t.ts = ts 374 ts.heap = append(ts.heap, timerWhen{t, t.when}) 375 ts.siftUp(len(ts.heap) - 1) 376 if t == ts.heap[0].timer { 377 ts.updateMinWhenHeap() 378 } 379 } 380 381 // maybeRunAsync checks whether t needs to be triggered and runs it if so. 382 // The caller is responsible for locking the timer and for checking that we 383 // are running timers in async mode. If the timer needs to be run, 384 // maybeRunAsync will unlock and re-lock it. 385 // The timer is always locked on return. 386 func (t *timer) maybeRunAsync() { 387 assertLockHeld(&t.mu) 388 if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { 389 // If timer should have triggered already (but nothing looked at it yet), 390 // trigger now, so that a receive after the stop sees the "old" value 391 // that should be there. 392 // (It is possible to have t.blocked > 0 if there is a racing receive 393 // in blockTimerChan, but timerHeaped not being set means 394 // it hasn't run t.maybeAdd yet; in that case, running the 395 // timer ourselves now is fine.) 396 if now := nanotime(); t.when <= now { 397 systemstack(func() { 398 t.unlockAndRun(now) // resets t.when 399 }) 400 t.lock() 401 } 402 } 403 } 404 405 // stop stops the timer t. It may be on some other P, so we can't 406 // actually remove it from the timers heap. We can only mark it as stopped. 407 // It will be removed in due course by the P whose heap it is on. 408 // Reports whether the timer was stopped before it was run. 409 func (t *timer) stop() bool { 410 async := debug.asynctimerchan.Load() != 0 411 if !async && t.isChan { 412 lock(&t.sendLock) 413 } 414 415 t.lock() 416 t.trace("stop") 417 if async { 418 t.maybeRunAsync() 419 } 420 if t.state&timerHeaped != 0 { 421 t.state |= timerModified 422 if t.state&timerZombie == 0 { 423 t.state |= timerZombie 424 t.ts.zombies.Add(1) 425 } 426 } 427 pending := t.when > 0 428 t.when = 0 429 430 if !async && t.isChan { 431 // Stop any future sends with stale values. 432 // See timer.unlockAndRun. 433 t.seq++ 434 } 435 t.unlock() 436 if !async && t.isChan { 437 unlock(&t.sendLock) 438 if timerchandrain(t.hchan()) { 439 pending = true 440 } 441 } 442 443 return pending 444 } 445 446 // deleteMin removes timer 0 from ts. 447 // ts must be locked. 448 func (ts *timers) deleteMin() { 449 assertLockHeld(&ts.mu) 450 t := ts.heap[0].timer 451 if t.ts != ts { 452 throw("wrong timers") 453 } 454 t.ts = nil 455 last := len(ts.heap) - 1 456 if last > 0 { 457 ts.heap[0] = ts.heap[last] 458 } 459 ts.heap[last] = timerWhen{} 460 ts.heap = ts.heap[:last] 461 if last > 0 { 462 ts.siftDown(0) 463 } 464 ts.updateMinWhenHeap() 465 if last == 0 { 466 // If there are no timers, then clearly there are no timerModified timers. 467 ts.minWhenModified.Store(0) 468 } 469 } 470 471 // modify modifies an existing timer. 472 // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. 473 // Reports whether the timer was modified before it was run. 474 // If f == nil, then t.f, t.arg, and t.seq are not modified. 475 func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool { 476 if when <= 0 { 477 throw("timer when must be positive") 478 } 479 if period < 0 { 480 throw("timer period must be non-negative") 481 } 482 async := debug.asynctimerchan.Load() != 0 483 484 if !async && t.isChan { 485 lock(&t.sendLock) 486 } 487 488 t.lock() 489 if async { 490 t.maybeRunAsync() 491 } 492 t.trace("modify") 493 t.period = period 494 if f != nil { 495 t.f = f 496 t.arg = arg 497 t.seq = seq 498 } 499 500 wake := false 501 pending := t.when > 0 502 t.when = when 503 if t.state&timerHeaped != 0 { 504 t.state |= timerModified 505 if t.state&timerZombie != 0 { 506 // In the heap but marked for removal (by a Stop). 507 // Unmark it, since it has been Reset and will be running again. 508 t.ts.zombies.Add(-1) 509 t.state &^= timerZombie 510 } 511 // The corresponding heap[i].when is updated later. 512 // See comment in type timer above and in timers.adjust below. 513 if min := t.ts.minWhenModified.Load(); min == 0 || when < min { 514 wake = true 515 // Force timerModified bit out to t.astate before updating t.minWhenModified, 516 // to synchronize with t.ts.adjust. See comment in adjust. 517 t.astate.Store(t.state) 518 t.ts.updateMinWhenModified(when) 519 } 520 } 521 522 add := t.needsAdd() 523 524 if !async && t.isChan { 525 // Stop any future sends with stale values. 526 // See timer.unlockAndRun. 527 t.seq++ 528 } 529 t.unlock() 530 if !async && t.isChan { 531 if timerchandrain(t.hchan()) { 532 pending = true 533 } 534 unlock(&t.sendLock) 535 } 536 537 if add { 538 t.maybeAdd() 539 } 540 if wake { 541 wakeNetPoller(when) 542 } 543 544 return pending 545 } 546 547 // needsAdd reports whether t needs to be added to a timers heap. 548 // t must be locked. 549 func (t *timer) needsAdd() bool { 550 assertLockHeld(&t.mu) 551 need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0) 552 if need { 553 t.trace("needsAdd+") 554 } else { 555 t.trace("needsAdd-") 556 } 557 return need 558 } 559 560 // maybeAdd adds t to the local timers heap if it needs to be in a heap. 561 // The caller must not hold t's lock nor any timers heap lock. 562 // The caller probably just unlocked t, but that lock must be dropped 563 // in order to acquire a ts.lock, to avoid lock inversions. 564 // (timers.adjust holds ts.lock while acquiring each t's lock, 565 // so we cannot hold any t's lock while acquiring ts.lock). 566 // 567 // Strictly speaking it *might* be okay to hold t.lock and 568 // acquire ts.lock at the same time, because we know that 569 // t is not in any ts.heap, so nothing holding a ts.lock would 570 // be acquiring the t.lock at the same time, meaning there 571 // isn't a possible deadlock. But it is easier and safer not to be 572 // too clever and respect the static ordering. 573 // (If we don't, we have to change the static lock checking of t and ts.) 574 // 575 // Concurrent calls to time.Timer.Reset or blockTimerChan 576 // may result in concurrent calls to t.maybeAdd, 577 // so we cannot assume that t is not in a heap on entry to t.maybeAdd. 578 func (t *timer) maybeAdd() { 579 // Note: Not holding any locks on entry to t.maybeAdd, 580 // so the current g can be rescheduled to a different M and P 581 // at any time, including between the ts := assignment and the 582 // call to ts.lock. If a reschedule happened then, we would be 583 // adding t to some other P's timers, perhaps even a P that the scheduler 584 // has marked as idle with no timers, in which case the timer could 585 // go unnoticed until long after t.when. 586 // Calling acquirem instead of using getg().m makes sure that 587 // we end up locking and inserting into the current P's timers. 588 mp := acquirem() 589 ts := &mp.p.ptr().timers 590 ts.lock() 591 ts.cleanHead() 592 t.lock() 593 t.trace("maybeAdd") 594 when := int64(0) 595 wake := false 596 if t.needsAdd() { 597 t.state |= timerHeaped 598 when = t.when 599 wakeTime := ts.wakeTime() 600 wake = wakeTime == 0 || when < wakeTime 601 ts.addHeap(t) 602 } 603 t.unlock() 604 ts.unlock() 605 releasem(mp) 606 if wake { 607 wakeNetPoller(when) 608 } 609 } 610 611 // reset resets the time when a timer should fire. 612 // If used for an inactive timer, the timer will become active. 613 // Reports whether the timer was active and was stopped. 614 func (t *timer) reset(when, period int64) bool { 615 return t.modify(when, period, nil, nil, 0) 616 } 617 618 // cleanHead cleans up the head of the timer queue. This speeds up 619 // programs that create and delete timers; leaving them in the heap 620 // slows down heap operations. 621 // The caller must have locked ts. 622 func (ts *timers) cleanHead() { 623 ts.trace("cleanHead") 624 assertLockHeld(&ts.mu) 625 gp := getg() 626 for { 627 if len(ts.heap) == 0 { 628 return 629 } 630 631 // This loop can theoretically run for a while, and because 632 // it is holding timersLock it cannot be preempted. 633 // If someone is trying to preempt us, just return. 634 // We can clean the timers later. 635 if gp.preemptStop { 636 return 637 } 638 639 // Delete zombies from tail of heap. It requires no heap adjustments at all, 640 // and doing so increases the chances that when we swap out a zombie 641 // in heap[0] for the tail of the heap, we'll get a non-zombie timer, 642 // shortening this loop. 643 n := len(ts.heap) 644 if t := ts.heap[n-1].timer; t.astate.Load()&timerZombie != 0 { 645 t.lock() 646 if t.state&timerZombie != 0 { 647 t.state &^= timerHeaped | timerZombie | timerModified 648 t.ts = nil 649 ts.zombies.Add(-1) 650 ts.heap[n-1] = timerWhen{} 651 ts.heap = ts.heap[:n-1] 652 } 653 t.unlock() 654 continue 655 } 656 657 t := ts.heap[0].timer 658 if t.ts != ts { 659 throw("bad ts") 660 } 661 662 if t.astate.Load()&(timerModified|timerZombie) == 0 { 663 // Fast path: head of timers does not need adjustment. 664 return 665 } 666 667 t.lock() 668 updated := t.updateHeap() 669 t.unlock() 670 if !updated { 671 // Head of timers does not need adjustment. 672 return 673 } 674 } 675 } 676 677 // take moves any timers from src into ts 678 // and then clears the timer state from src, 679 // because src is being destroyed. 680 // The caller must not have locked either timers. 681 // For now this is only called when the world is stopped. 682 func (ts *timers) take(src *timers) { 683 ts.trace("take") 684 assertWorldStopped() 685 if len(src.heap) > 0 { 686 // The world is stopped, so we ignore the locking of ts and src here. 687 // That would introduce a sched < timers lock ordering, 688 // which we'd rather avoid in the static ranking. 689 for _, tw := range src.heap { 690 t := tw.timer 691 t.ts = nil 692 if t.state&timerZombie != 0 { 693 t.state &^= timerHeaped | timerZombie | timerModified 694 } else { 695 t.state &^= timerModified 696 ts.addHeap(t) 697 } 698 } 699 src.heap = nil 700 src.zombies.Store(0) 701 src.minWhenHeap.Store(0) 702 src.minWhenModified.Store(0) 703 src.len.Store(0) 704 ts.len.Store(uint32(len(ts.heap))) 705 } 706 } 707 708 // adjust looks through the timers in ts.heap for 709 // any timers that have been modified to run earlier, and puts them in 710 // the correct place in the heap. While looking for those timers, 711 // it also moves timers that have been modified to run later, 712 // and removes deleted timers. The caller must have locked ts. 713 func (ts *timers) adjust(now int64, force bool) { 714 ts.trace("adjust") 715 assertLockHeld(&ts.mu) 716 // If we haven't yet reached the time of the earliest modified 717 // timer, don't do anything. This speeds up programs that adjust 718 // a lot of timers back and forth if the timers rarely expire. 719 // We'll postpone looking through all the adjusted timers until 720 // one would actually expire. 721 if !force { 722 first := ts.minWhenModified.Load() 723 if first == 0 || first > now { 724 if verifyTimers { 725 ts.verify() 726 } 727 return 728 } 729 } 730 731 // minWhenModified is a lower bound on the earliest t.when 732 // among the timerModified timers. We want to make it more precise: 733 // we are going to scan the heap and clean out all the timerModified bits, 734 // at which point minWhenModified can be set to 0 (indicating none at all). 735 // 736 // Other P's can be calling ts.wakeTime concurrently, and we'd like to 737 // keep ts.wakeTime returning an accurate value throughout this entire process. 738 // 739 // Setting minWhenModified = 0 *before* the scan could make wakeTime 740 // return an incorrect value: if minWhenModified < minWhenHeap, then clearing 741 // it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes. 742 // To avoid that, we want to set minWhenModified to 0 *after* the scan. 743 // 744 // Setting minWhenModified = 0 *after* the scan could result in missing 745 // concurrent timer modifications in other goroutines; those will lock 746 // the specific timer, set the timerModified bit, and set t.when. 747 // To avoid that, we want to set minWhenModified to 0 *before* the scan. 748 // 749 // The way out of this dilemma is to preserve wakeTime a different way. 750 // wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap 751 // is protected by ts.lock, which we hold, so we can modify it however we like 752 // in service of keeping wakeTime accurate. 753 // 754 // So we can: 755 // 756 // 1. Set minWhenHeap = min(minWhenHeap, minWhenModified) 757 // 2. Set minWhenModified = 0 758 // (Other goroutines may modify timers and update minWhenModified now.) 759 // 3. Scan timers 760 // 4. Set minWhenHeap = heap[0].when 761 // 762 // That order preserves a correct value of wakeTime throughout the entire 763 // operation: 764 // Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared. 765 // Step 2 makes sure concurrent t.when updates are not lost during the scan. 766 // Step 3 processes all modified timer values, justifying minWhenModified = 0. 767 // Step 4 corrects minWhenHeap to a precise value. 768 // 769 // The wakeTime method implementation reads minWhenModified *before* minWhenHeap, 770 // so that if the minWhenModified is observed to be 0, that means the minWhenHeap that 771 // follows will include the information that was zeroed out of it. 772 // 773 // Originally Step 3 locked every timer, which made sure any timer update that was 774 // already in progress during Steps 1+2 completed and was observed by Step 3. 775 // All that locking was too expensive, so now we do an atomic load of t.astate to 776 // decide whether we need to do a full lock. To make sure that we still observe any 777 // timer update already in progress during Steps 1+2, t.modify sets timerModified 778 // in t.astate *before* calling t.updateMinWhenModified. That ensures that the 779 // overwrite in Step 2 cannot lose an update: if it does overwrite an update, Step 3 780 // will see the timerModified and do a full lock. 781 ts.minWhenHeap.Store(ts.wakeTime()) 782 ts.minWhenModified.Store(0) 783 784 changed := false 785 for i := 0; i < len(ts.heap); i++ { 786 tw := &ts.heap[i] 787 t := tw.timer 788 if t.ts != ts { 789 throw("bad ts") 790 } 791 792 if t.astate.Load()&(timerModified|timerZombie) == 0 { 793 // Does not need adjustment. 794 continue 795 } 796 797 t.lock() 798 switch { 799 case t.state&timerHeaped == 0: 800 badTimer() 801 802 case t.state&timerZombie != 0: 803 ts.zombies.Add(-1) 804 t.state &^= timerHeaped | timerZombie | timerModified 805 n := len(ts.heap) 806 ts.heap[i] = ts.heap[n-1] 807 ts.heap[n-1] = timerWhen{} 808 ts.heap = ts.heap[:n-1] 809 t.ts = nil 810 i-- 811 changed = true 812 813 case t.state&timerModified != 0: 814 tw.when = t.when 815 t.state &^= timerModified 816 changed = true 817 } 818 t.unlock() 819 } 820 821 if changed { 822 ts.initHeap() 823 } 824 ts.updateMinWhenHeap() 825 826 if verifyTimers { 827 ts.verify() 828 } 829 } 830 831 // wakeTime looks at ts's timers and returns the time when we 832 // should wake up the netpoller. It returns 0 if there are no timers. 833 // This function is invoked when dropping a P, so it must run without 834 // any write barriers. 835 // 836 //go:nowritebarrierrec 837 func (ts *timers) wakeTime() int64 { 838 // Note that the order of these two loads matters: 839 // adjust updates minWhen to make it safe to clear minNextWhen. 840 // We read minWhen after reading minNextWhen so that 841 // if we see a cleared minNextWhen, we are guaranteed to see 842 // the updated minWhen. 843 nextWhen := ts.minWhenModified.Load() 844 when := ts.minWhenHeap.Load() 845 if when == 0 || (nextWhen != 0 && nextWhen < when) { 846 when = nextWhen 847 } 848 return when 849 } 850 851 // check runs any timers in ts that are ready. 852 // If now is not 0 it is the current time. 853 // It returns the passed time or the current time if now was passed as 0. 854 // and the time when the next timer should run or 0 if there is no next timer, 855 // and reports whether it ran any timers. 856 // If the time when the next timer should run is not 0, 857 // it is always larger than the returned time. 858 // We pass now in and out to avoid extra calls of nanotime. 859 // 860 //go:yeswritebarrierrec 861 func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) { 862 ts.trace("check") 863 // If it's not yet time for the first timer, or the first adjusted 864 // timer, then there is nothing to do. 865 next := ts.wakeTime() 866 if next == 0 { 867 // No timers to run or adjust. 868 return now, 0, false 869 } 870 871 if now == 0 { 872 now = nanotime() 873 } 874 875 // If this is the local P, and there are a lot of deleted timers, 876 // clear them out. We only do this for the local P to reduce 877 // lock contention on timersLock. 878 zombies := ts.zombies.Load() 879 if zombies < 0 { 880 badTimer() 881 } 882 force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4 883 884 if now < next && !force { 885 // Next timer is not ready to run, and we don't need to clear deleted timers. 886 return now, next, false 887 } 888 889 ts.lock() 890 if len(ts.heap) > 0 { 891 ts.adjust(now, false) 892 for len(ts.heap) > 0 { 893 // Note that runtimer may temporarily unlock ts. 894 if tw := ts.run(now); tw != 0 { 895 if tw > 0 { 896 pollUntil = tw 897 } 898 break 899 } 900 ran = true 901 } 902 903 // Note: Delaying the forced adjustment until after the ts.run 904 // (as opposed to calling ts.adjust(now, force) above) 905 // is significantly faster under contention, such as in 906 // package time's BenchmarkTimerAdjust10000, 907 // though we do not fully understand why. 908 force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4 909 if force { 910 ts.adjust(now, true) 911 } 912 } 913 ts.unlock() 914 915 return now, pollUntil, ran 916 } 917 918 // run examines the first timer in ts. If it is ready based on now, 919 // it runs the timer and removes or updates it. 920 // Returns 0 if it ran a timer, -1 if there are no more timers, or the time 921 // when the first timer should run. 922 // The caller must have locked ts. 923 // If a timer is run, this will temporarily unlock ts. 924 // 925 //go:systemstack 926 func (ts *timers) run(now int64) int64 { 927 ts.trace("run") 928 assertLockHeld(&ts.mu) 929 Redo: 930 if len(ts.heap) == 0 { 931 return -1 932 } 933 tw := ts.heap[0] 934 t := tw.timer 935 if t.ts != ts { 936 throw("bad ts") 937 } 938 939 if t.astate.Load()&(timerModified|timerZombie) == 0 && tw.when > now { 940 // Fast path: not ready to run. 941 return tw.when 942 } 943 944 t.lock() 945 if t.updateHeap() { 946 t.unlock() 947 goto Redo 948 } 949 950 if t.state&timerHeaped == 0 || t.state&timerModified != 0 { 951 badTimer() 952 } 953 954 if t.when > now { 955 // Not ready to run. 956 t.unlock() 957 return t.when 958 } 959 960 t.unlockAndRun(now) 961 assertLockHeld(&ts.mu) // t is unlocked now, but not ts 962 return 0 963 } 964 965 // unlockAndRun unlocks and runs the timer t (which must be locked). 966 // If t is in a timer set (t.ts != nil), the caller must also have locked the timer set, 967 // and this call will temporarily unlock the timer set while running the timer function. 968 // unlockAndRun returns with t unlocked and t.ts (re-)locked. 969 // 970 //go:systemstack 971 func (t *timer) unlockAndRun(now int64) { 972 t.trace("unlockAndRun") 973 assertLockHeld(&t.mu) 974 if t.ts != nil { 975 assertLockHeld(&t.ts.mu) 976 } 977 if raceenabled { 978 // Note that we are running on a system stack, 979 // so there is no chance of getg().m being reassigned 980 // out from under us while this function executes. 981 tsLocal := &getg().m.p.ptr().timers 982 if tsLocal.raceCtx == 0 { 983 tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum) 984 } 985 raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t)) 986 } 987 988 if t.state&(timerModified|timerZombie) != 0 { 989 badTimer() 990 } 991 992 f := t.f 993 arg := t.arg 994 seq := t.seq 995 var next int64 996 delay := now - t.when 997 if t.period > 0 { 998 // Leave in heap but adjust next time to fire. 999 next = t.when + t.period*(1+delay/t.period) 1000 if next < 0 { // check for overflow. 1001 next = maxWhen 1002 } 1003 } else { 1004 next = 0 1005 } 1006 ts := t.ts 1007 t.when = next 1008 if t.state&timerHeaped != 0 { 1009 t.state |= timerModified 1010 if next == 0 { 1011 t.state |= timerZombie 1012 t.ts.zombies.Add(1) 1013 } 1014 t.updateHeap() 1015 } 1016 t.unlock() 1017 1018 if raceenabled { 1019 // Temporarily use the current P's racectx for g0. 1020 gp := getg() 1021 if gp.racectx != 0 { 1022 throw("unexpected racectx") 1023 } 1024 gp.racectx = gp.m.p.ptr().timers.raceCtx 1025 } 1026 1027 if ts != nil { 1028 ts.unlock() 1029 } 1030 1031 async := debug.asynctimerchan.Load() != 0 1032 if !async && t.isChan { 1033 // For a timer channel, we want to make sure that no stale sends 1034 // happen after a t.stop or t.modify, but we cannot hold t.mu 1035 // during the actual send (which f does) due to lock ordering. 1036 // It can happen that we are holding t's lock above, we decide 1037 // it's time to send a time value (by calling f), grab the parameters, 1038 // unlock above, and then a t.stop or t.modify changes the timer 1039 // and returns. At that point, the send needs not to happen after all. 1040 // The way we arrange for it not to happen is that t.stop and t.modify 1041 // both increment t.seq while holding both t.mu and t.sendLock. 1042 // We copied the seq value above while holding t.mu. 1043 // Now we can acquire t.sendLock (which will be held across the send) 1044 // and double-check that t.seq is still the seq value we saw above. 1045 // If not, the timer has been updated and we should skip the send. 1046 // We skip the send by reassigning f to a no-op function. 1047 lock(&t.sendLock) 1048 if t.seq != seq { 1049 f = func(any, uintptr, int64) {} 1050 } 1051 } 1052 1053 f(arg, seq, delay) 1054 1055 if !async && t.isChan { 1056 unlock(&t.sendLock) 1057 } 1058 1059 if ts != nil { 1060 ts.lock() 1061 } 1062 1063 if raceenabled { 1064 gp := getg() 1065 gp.racectx = 0 1066 } 1067 } 1068 1069 // verifyTimerHeap verifies that the timers is in a valid state. 1070 // This is only for debugging, and is only called if verifyTimers is true. 1071 // The caller must have locked ts. 1072 func (ts *timers) verify() { 1073 assertLockHeld(&ts.mu) 1074 for i, tw := range ts.heap { 1075 if i == 0 { 1076 // First timer has no parent. 1077 continue 1078 } 1079 1080 // The heap is timerHeapN-ary. See siftupTimer and siftdownTimer. 1081 p := int(uint(i-1) / timerHeapN) 1082 if tw.when < ts.heap[p].when { 1083 print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n") 1084 throw("bad timer heap") 1085 } 1086 } 1087 if n := int(ts.len.Load()); len(ts.heap) != n { 1088 println("timer heap len", len(ts.heap), "!= atomic len", n) 1089 throw("bad timer heap len") 1090 } 1091 } 1092 1093 // updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when. 1094 // The caller must have locked ts or the world must be stopped. 1095 func (ts *timers) updateMinWhenHeap() { 1096 assertWorldStoppedOrLockHeld(&ts.mu) 1097 if len(ts.heap) == 0 { 1098 ts.minWhenHeap.Store(0) 1099 } else { 1100 ts.minWhenHeap.Store(ts.heap[0].when) 1101 } 1102 } 1103 1104 // updateMinWhenModified updates ts.minWhenModified to be <= when. 1105 // ts need not be (and usually is not) locked. 1106 func (ts *timers) updateMinWhenModified(when int64) { 1107 for { 1108 old := ts.minWhenModified.Load() 1109 if old != 0 && old < when { 1110 return 1111 } 1112 if ts.minWhenModified.CompareAndSwap(old, when) { 1113 return 1114 } 1115 } 1116 } 1117 1118 // timeSleepUntil returns the time when the next timer should fire. Returns 1119 // maxWhen if there are no timers. 1120 // This is only called by sysmon and checkdead. 1121 func timeSleepUntil() int64 { 1122 next := int64(maxWhen) 1123 1124 // Prevent allp slice changes. This is like retake. 1125 lock(&allpLock) 1126 for _, pp := range allp { 1127 if pp == nil { 1128 // This can happen if procresize has grown 1129 // allp but not yet created new Ps. 1130 continue 1131 } 1132 1133 if w := pp.timers.wakeTime(); w != 0 { 1134 next = min(next, w) 1135 } 1136 } 1137 unlock(&allpLock) 1138 1139 return next 1140 } 1141 1142 const timerHeapN = 4 1143 1144 // Heap maintenance algorithms. 1145 // These algorithms check for slice index errors manually. 1146 // Slice index error can happen if the program is using racy 1147 // access to timers. We don't want to panic here, because 1148 // it will cause the program to crash with a mysterious 1149 // "panic holding locks" message. Instead, we panic while not 1150 // holding a lock. 1151 1152 // siftUp puts the timer at position i in the right place 1153 // in the heap by moving it up toward the top of the heap. 1154 func (ts *timers) siftUp(i int) { 1155 heap := ts.heap 1156 if i >= len(heap) { 1157 badTimer() 1158 } 1159 tw := heap[i] 1160 when := tw.when 1161 if when <= 0 { 1162 badTimer() 1163 } 1164 for i > 0 { 1165 p := int(uint(i-1) / timerHeapN) // parent 1166 if when >= heap[p].when { 1167 break 1168 } 1169 heap[i] = heap[p] 1170 i = p 1171 } 1172 if heap[i].timer != tw.timer { 1173 heap[i] = tw 1174 } 1175 } 1176 1177 // siftDown puts the timer at position i in the right place 1178 // in the heap by moving it down toward the bottom of the heap. 1179 func (ts *timers) siftDown(i int) { 1180 heap := ts.heap 1181 n := len(heap) 1182 if i >= n { 1183 badTimer() 1184 } 1185 if i*timerHeapN+1 >= n { 1186 return 1187 } 1188 tw := heap[i] 1189 when := tw.when 1190 if when <= 0 { 1191 badTimer() 1192 } 1193 for { 1194 leftChild := i*timerHeapN + 1 1195 if leftChild >= n { 1196 break 1197 } 1198 w := when 1199 c := -1 1200 for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] { 1201 if tw.when < w { 1202 w = tw.when 1203 c = leftChild + j 1204 } 1205 } 1206 if c < 0 { 1207 break 1208 } 1209 heap[i] = heap[c] 1210 i = c 1211 } 1212 if heap[i].timer != tw.timer { 1213 heap[i] = tw 1214 } 1215 } 1216 1217 // initHeap reestablishes the heap order in the slice ts.heap. 1218 // It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations. 1219 func (ts *timers) initHeap() { 1220 // Last possible element that needs sifting down is parent of last element; 1221 // last element is len(t)-1; parent of last element is (len(t)-1-1)/timerHeapN. 1222 if len(ts.heap) <= 1 { 1223 return 1224 } 1225 for i := int(uint(len(ts.heap)-1-1) / timerHeapN); i >= 0; i-- { 1226 ts.siftDown(i) 1227 } 1228 } 1229 1230 // badTimer is called if the timer data structures have been corrupted, 1231 // presumably due to racy use by the program. We panic here rather than 1232 // panicking due to invalid slice access while holding locks. 1233 // See issue #25686. 1234 func badTimer() { 1235 throw("timer data corruption") 1236 } 1237 1238 // Timer channels. 1239 1240 // maybeRunChan checks whether the timer needs to run 1241 // to send a value to its associated channel. If so, it does. 1242 // The timer must not be locked. 1243 func (t *timer) maybeRunChan() { 1244 if t.astate.Load()&timerHeaped != 0 { 1245 // If the timer is in the heap, the ordinary timer code 1246 // is in charge of sending when appropriate. 1247 return 1248 } 1249 1250 t.lock() 1251 now := nanotime() 1252 if t.state&timerHeaped != 0 || t.when == 0 || t.when > now { 1253 t.trace("maybeRunChan-") 1254 // Timer in the heap, or not running at all, or not triggered. 1255 t.unlock() 1256 return 1257 } 1258 t.trace("maybeRunChan+") 1259 systemstack(func() { 1260 t.unlockAndRun(now) 1261 }) 1262 } 1263 1264 // blockTimerChan is called when a channel op has decided to block on c. 1265 // The caller holds the channel lock for c and possibly other channels. 1266 // blockTimerChan makes sure that c is in a timer heap, 1267 // adding it if needed. 1268 func blockTimerChan(c *hchan) { 1269 t := c.timer 1270 t.lock() 1271 t.trace("blockTimerChan") 1272 if !t.isChan { 1273 badTimer() 1274 } 1275 1276 t.blocked++ 1277 1278 // If this is the first enqueue after a recent dequeue, 1279 // the timer may still be in the heap but marked as a zombie. 1280 // Unmark it in this case, if the timer is still pending. 1281 if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 { 1282 t.state &^= timerZombie 1283 t.ts.zombies.Add(-1) 1284 } 1285 1286 // t.maybeAdd must be called with t unlocked, 1287 // because it needs to lock t.ts before t. 1288 // Then it will do nothing if t.needsAdd(state) is false. 1289 // Check that now before the unlock, 1290 // avoiding the extra lock-lock-unlock-unlock 1291 // inside maybeAdd when t does not need to be added. 1292 add := t.needsAdd() 1293 t.unlock() 1294 if add { 1295 t.maybeAdd() 1296 } 1297 } 1298 1299 // unblockTimerChan is called when a channel op that was blocked on c 1300 // is no longer blocked. Every call to blockTimerChan must be paired with 1301 // a call to unblockTimerChan. 1302 // The caller holds the channel lock for c and possibly other channels. 1303 // unblockTimerChan removes c from the timer heap when nothing is 1304 // blocked on it anymore. 1305 func unblockTimerChan(c *hchan) { 1306 t := c.timer 1307 t.lock() 1308 t.trace("unblockTimerChan") 1309 if !t.isChan || t.blocked == 0 { 1310 badTimer() 1311 } 1312 t.blocked-- 1313 if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 { 1314 // Last goroutine that was blocked on this timer. 1315 // Mark for removal from heap but do not clear t.when, 1316 // so that we know what time it is still meant to trigger. 1317 t.state |= timerZombie 1318 t.ts.zombies.Add(1) 1319 } 1320 t.unlock() 1321 } 1322