Source file src/runtime/sema.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Semaphore implementation exposed to Go.
     6  // Intended use is provide a sleep and wakeup
     7  // primitive that can be used in the contended case
     8  // of other synchronization primitives.
     9  // Thus it targets the same goal as Linux's futex,
    10  // but it has much simpler semantics.
    11  //
    12  // That is, don't think of these as semaphores.
    13  // Think of them as a way to implement sleep and wakeup
    14  // such that every sleep is paired with a single wakeup,
    15  // even if, due to races, the wakeup happens before the sleep.
    16  //
    17  // See Mullender and Cox, ``Semaphores in Plan 9,''
    18  // https://swtch.com/semaphore.pdf
    19  
    20  package runtime
    21  
    22  import (
    23  	"internal/cpu"
    24  	"internal/runtime/atomic"
    25  	"unsafe"
    26  )
    27  
    28  // Asynchronous semaphore for sync.Mutex.
    29  
    30  // A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem).
    31  // Each of those sudog may in turn point (through s.waitlink) to a list
    32  // of other sudogs waiting on the same address.
    33  // The operations on the inner lists of sudogs with the same address
    34  // are all O(1). The scanning of the top-level semaRoot list is O(log n),
    35  // where n is the number of distinct addresses with goroutines blocked
    36  // on them that hash to the given semaRoot.
    37  // See golang.org/issue/17953 for a program that worked badly
    38  // before we introduced the second level of list, and
    39  // BenchmarkSemTable/OneAddrCollision/* for a benchmark that exercises this.
    40  type semaRoot struct {
    41  	lock  mutex
    42  	treap *sudog        // root of balanced tree of unique waiters.
    43  	nwait atomic.Uint32 // Number of waiters. Read w/o the lock.
    44  }
    45  
    46  var semtable semTable
    47  
    48  // Prime to not correlate with any user patterns.
    49  const semTabSize = 251
    50  
    51  type semTable [semTabSize]struct {
    52  	root semaRoot
    53  	pad  [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
    54  }
    55  
    56  func (t *semTable) rootFor(addr *uint32) *semaRoot {
    57  	return &t[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
    58  }
    59  
    60  // sync_runtime_Semacquire should be an internal detail,
    61  // but widely used packages access it using linkname.
    62  // Notable members of the hall of shame include:
    63  //   - gvisor.dev/gvisor
    64  //   - github.com/sagernet/gvisor
    65  //
    66  // Do not remove or change the type signature.
    67  // See go.dev/issue/67401.
    68  //
    69  //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
    70  func sync_runtime_Semacquire(addr *uint32) {
    71  	semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire)
    72  }
    73  
    74  //go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire
    75  func poll_runtime_Semacquire(addr *uint32) {
    76  	semacquire1(addr, false, semaBlockProfile, 0, waitReasonSemacquire)
    77  }
    78  
    79  // sync_runtime_Semrelease should be an internal detail,
    80  // but widely used packages access it using linkname.
    81  // Notable members of the hall of shame include:
    82  //   - gvisor.dev/gvisor
    83  //   - github.com/sagernet/gvisor
    84  //
    85  // Do not remove or change the type signature.
    86  // See go.dev/issue/67401.
    87  //
    88  //go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
    89  func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
    90  	semrelease1(addr, handoff, skipframes)
    91  }
    92  
    93  //go:linkname internal_sync_runtime_SemacquireMutex internal/sync.runtime_SemacquireMutex
    94  func internal_sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) {
    95  	semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncMutexLock)
    96  }
    97  
    98  //go:linkname sync_runtime_SemacquireRWMutexR sync.runtime_SemacquireRWMutexR
    99  func sync_runtime_SemacquireRWMutexR(addr *uint32, lifo bool, skipframes int) {
   100  	semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexRLock)
   101  }
   102  
   103  //go:linkname sync_runtime_SemacquireRWMutex sync.runtime_SemacquireRWMutex
   104  func sync_runtime_SemacquireRWMutex(addr *uint32, lifo bool, skipframes int) {
   105  	semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes, waitReasonSyncRWMutexLock)
   106  }
   107  
   108  //go:linkname sync_runtime_SemacquireWaitGroup sync.runtime_SemacquireWaitGroup
   109  func sync_runtime_SemacquireWaitGroup(addr *uint32, synctestDurable bool) {
   110  	reason := waitReasonSyncWaitGroupWait
   111  	if synctestDurable {
   112  		reason = waitReasonSynctestWaitGroupWait
   113  	}
   114  	semacquire1(addr, false, semaBlockProfile, 0, reason)
   115  }
   116  
   117  //go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease
   118  func poll_runtime_Semrelease(addr *uint32) {
   119  	semrelease(addr)
   120  }
   121  
   122  //go:linkname internal_sync_runtime_Semrelease internal/sync.runtime_Semrelease
   123  func internal_sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) {
   124  	semrelease1(addr, handoff, skipframes)
   125  }
   126  
   127  func readyWithTime(s *sudog, traceskip int) {
   128  	if s.releasetime != 0 {
   129  		s.releasetime = cputicks()
   130  	}
   131  	goready(s.g, traceskip)
   132  }
   133  
   134  type semaProfileFlags int
   135  
   136  const (
   137  	semaBlockProfile semaProfileFlags = 1 << iota
   138  	semaMutexProfile
   139  )
   140  
   141  // Called from runtime.
   142  func semacquire(addr *uint32) {
   143  	semacquire1(addr, false, 0, 0, waitReasonSemacquire)
   144  }
   145  
   146  func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason) {
   147  	gp := getg()
   148  	if gp != gp.m.curg {
   149  		throw("semacquire not on the G stack")
   150  	}
   151  
   152  	// Easy case.
   153  	if cansemacquire(addr) {
   154  		return
   155  	}
   156  
   157  	// Harder case:
   158  	//	increment waiter count
   159  	//	try cansemacquire one more time, return if succeeded
   160  	//	enqueue itself as a waiter
   161  	//	sleep
   162  	//	(waiter descriptor is dequeued by signaler)
   163  	s := acquireSudog()
   164  	root := semtable.rootFor(addr)
   165  	t0 := int64(0)
   166  	s.releasetime = 0
   167  	s.acquiretime = 0
   168  	s.ticket = 0
   169  	if profile&semaBlockProfile != 0 && blockprofilerate > 0 {
   170  		t0 = cputicks()
   171  		s.releasetime = -1
   172  	}
   173  	if profile&semaMutexProfile != 0 && mutexprofilerate > 0 {
   174  		if t0 == 0 {
   175  			t0 = cputicks()
   176  		}
   177  		s.acquiretime = t0
   178  	}
   179  	for {
   180  		lockWithRank(&root.lock, lockRankRoot)
   181  		// Add ourselves to nwait to disable "easy case" in semrelease.
   182  		root.nwait.Add(1)
   183  		// Check cansemacquire to avoid missed wakeup.
   184  		if cansemacquire(addr) {
   185  			root.nwait.Add(-1)
   186  			unlock(&root.lock)
   187  			break
   188  		}
   189  		// Any semrelease after the cansemacquire knows we're waiting
   190  		// (we set nwait above), so go to sleep.
   191  		root.queue(addr, s, lifo)
   192  		goparkunlock(&root.lock, reason, traceBlockSync, 4+skipframes)
   193  		if s.ticket != 0 || cansemacquire(addr) {
   194  			break
   195  		}
   196  	}
   197  	if s.releasetime > 0 {
   198  		blockevent(s.releasetime-t0, 3+skipframes)
   199  	}
   200  	releaseSudog(s)
   201  }
   202  
   203  func semrelease(addr *uint32) {
   204  	semrelease1(addr, false, 0)
   205  }
   206  
   207  func semrelease1(addr *uint32, handoff bool, skipframes int) {
   208  	root := semtable.rootFor(addr)
   209  	atomic.Xadd(addr, 1)
   210  
   211  	// Easy case: no waiters?
   212  	// This check must happen after the xadd, to avoid a missed wakeup
   213  	// (see loop in semacquire).
   214  	if root.nwait.Load() == 0 {
   215  		return
   216  	}
   217  
   218  	// Harder case: search for a waiter and wake it.
   219  	lockWithRank(&root.lock, lockRankRoot)
   220  	if root.nwait.Load() == 0 {
   221  		// The count is already consumed by another goroutine,
   222  		// so no need to wake up another goroutine.
   223  		unlock(&root.lock)
   224  		return
   225  	}
   226  	s, t0, tailtime := root.dequeue(addr)
   227  	if s != nil {
   228  		root.nwait.Add(-1)
   229  	}
   230  	unlock(&root.lock)
   231  	if s != nil { // May be slow or even yield, so unlock first
   232  		acquiretime := s.acquiretime
   233  		if acquiretime != 0 {
   234  			// Charge contention that this (delayed) unlock caused.
   235  			// If there are N more goroutines waiting beyond the
   236  			// one that's waking up, charge their delay as well, so that
   237  			// contention holding up many goroutines shows up as
   238  			// more costly than contention holding up a single goroutine.
   239  			// It would take O(N) time to calculate how long each goroutine
   240  			// has been waiting, so instead we charge avg(head-wait, tail-wait)*N.
   241  			// head-wait is the longest wait and tail-wait is the shortest.
   242  			// (When we do a lifo insertion, we preserve this property by
   243  			// copying the old head's acquiretime into the inserted new head.
   244  			// In that case the overall average may be slightly high, but that's fine:
   245  			// the average of the ends is only an approximation to the actual
   246  			// average anyway.)
   247  			// The root.dequeue above changed the head and tail acquiretime
   248  			// to the current time, so the next unlock will not re-count this contention.
   249  			dt0 := t0 - acquiretime
   250  			dt := dt0
   251  			if s.waiters != 0 {
   252  				dtail := t0 - tailtime
   253  				dt += (dtail + dt0) / 2 * int64(s.waiters)
   254  			}
   255  			mutexevent(dt, 3+skipframes)
   256  		}
   257  		if s.ticket != 0 {
   258  			throw("corrupted semaphore ticket")
   259  		}
   260  		if handoff && cansemacquire(addr) {
   261  			s.ticket = 1
   262  		}
   263  		readyWithTime(s, 5+skipframes)
   264  		if s.ticket == 1 && getg().m.locks == 0 && getg() != getg().m.g0 {
   265  			// Direct G handoff
   266  			//
   267  			// readyWithTime has added the waiter G as runnext in the
   268  			// current P; we now call the scheduler so that we start running
   269  			// the waiter G immediately.
   270  			//
   271  			// Note that waiter inherits our time slice: this is desirable
   272  			// to avoid having a highly contended semaphore hog the P
   273  			// indefinitely. goyield is like Gosched, but it emits a
   274  			// "preempted" trace event instead and, more importantly, puts
   275  			// the current G on the local runq instead of the global one.
   276  			// We only do this in the starving regime (handoff=true), as in
   277  			// the non-starving case it is possible for a different waiter
   278  			// to acquire the semaphore while we are yielding/scheduling,
   279  			// and this would be wasteful. We wait instead to enter starving
   280  			// regime, and then we start to do direct handoffs of ticket and P.
   281  			//
   282  			// See issue 33747 for discussion.
   283  			//
   284  			// We don't handoff directly if we're holding locks or on the
   285  			// system stack, since it's not safe to enter the scheduler.
   286  			goyield()
   287  		}
   288  	}
   289  }
   290  
   291  func cansemacquire(addr *uint32) bool {
   292  	for {
   293  		v := atomic.Load(addr)
   294  		if v == 0 {
   295  			return false
   296  		}
   297  		if atomic.Cas(addr, v, v-1) {
   298  			return true
   299  		}
   300  	}
   301  }
   302  
   303  // queue adds s to the blocked goroutines in semaRoot.
   304  func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
   305  	s.g = getg()
   306  	s.elem.set(unsafe.Pointer(addr))
   307  	// Storing this pointer so that we can trace the semaphore address
   308  	// from the blocked goroutine when checking for goroutine leaks.
   309  	s.g.waiting = s
   310  	s.next = nil
   311  	s.prev = nil
   312  	s.waiters = 0
   313  
   314  	var last *sudog
   315  	pt := &root.treap
   316  	for t := *pt; t != nil; t = *pt {
   317  		if uintptr(unsafe.Pointer(addr)) == t.elem.uintptr() {
   318  			// Already have addr in list.
   319  			if lifo {
   320  				// Substitute s in t's place in treap.
   321  				*pt = s
   322  				s.ticket = t.ticket
   323  				s.acquiretime = t.acquiretime // preserve head acquiretime as oldest time
   324  				s.parent = t.parent
   325  				s.prev = t.prev
   326  				s.next = t.next
   327  				if s.prev != nil {
   328  					s.prev.parent = s
   329  				}
   330  				if s.next != nil {
   331  					s.next.parent = s
   332  				}
   333  				// Add t first in s's wait list.
   334  				s.waitlink = t
   335  				s.waittail = t.waittail
   336  				if s.waittail == nil {
   337  					s.waittail = t
   338  				}
   339  				s.waiters = t.waiters
   340  				if s.waiters+1 != 0 {
   341  					s.waiters++
   342  				}
   343  				t.parent = nil
   344  				t.prev = nil
   345  				t.next = nil
   346  				t.waittail = nil
   347  			} else {
   348  				// Add s to end of t's wait list.
   349  				if t.waittail == nil {
   350  					t.waitlink = s
   351  				} else {
   352  					t.waittail.waitlink = s
   353  				}
   354  				t.waittail = s
   355  				s.waitlink = nil
   356  				if t.waiters+1 != 0 {
   357  					t.waiters++
   358  				}
   359  			}
   360  			return
   361  		}
   362  		last = t
   363  		if uintptr(unsafe.Pointer(addr)) < t.elem.uintptr() {
   364  			pt = &t.prev
   365  		} else {
   366  			pt = &t.next
   367  		}
   368  	}
   369  
   370  	// Add s as new leaf in tree of unique addrs.
   371  	// The balanced tree is a treap using ticket as the random heap priority.
   372  	// That is, it is a binary tree ordered according to the elem addresses,
   373  	// but then among the space of possible binary trees respecting those
   374  	// addresses, it is kept balanced on average by maintaining a heap ordering
   375  	// on the ticket: s.ticket <= both s.prev.ticket and s.next.ticket.
   376  	// https://en.wikipedia.org/wiki/Treap
   377  	// https://faculty.washington.edu/aragon/pubs/rst89.pdf
   378  	//
   379  	// s.ticket compared with zero in couple of places, therefore set lowest bit.
   380  	// It will not affect treap's quality noticeably.
   381  	s.ticket = cheaprand() | 1
   382  	s.parent = last
   383  	*pt = s
   384  
   385  	// Rotate up into tree according to ticket (priority).
   386  	for s.parent != nil && s.parent.ticket > s.ticket {
   387  		if s.parent.prev == s {
   388  			root.rotateRight(s.parent)
   389  		} else {
   390  			if s.parent.next != s {
   391  				panic("semaRoot queue")
   392  			}
   393  			root.rotateLeft(s.parent)
   394  		}
   395  	}
   396  }
   397  
   398  // dequeue searches for and finds the first goroutine
   399  // in semaRoot blocked on addr.
   400  // If the sudog was being profiled, dequeue returns the time
   401  // at which it was woken up as now. Otherwise now is 0.
   402  // If there are additional entries in the wait list, dequeue
   403  // returns tailtime set to the last entry's acquiretime.
   404  // Otherwise tailtime is found.acquiretime.
   405  func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) {
   406  	ps := &root.treap
   407  	s := *ps
   408  
   409  	for ; s != nil; s = *ps {
   410  		if uintptr(unsafe.Pointer(addr)) == s.elem.uintptr() {
   411  			goto Found
   412  		}
   413  
   414  		if uintptr(unsafe.Pointer(addr)) < s.elem.uintptr() {
   415  			ps = &s.prev
   416  		} else {
   417  			ps = &s.next
   418  		}
   419  	}
   420  	return nil, 0, 0
   421  
   422  Found:
   423  	now = int64(0)
   424  	if s.acquiretime != 0 {
   425  		now = cputicks()
   426  	}
   427  	if t := s.waitlink; t != nil {
   428  		// Substitute t, also waiting on addr, for s in root tree of unique addrs.
   429  		*ps = t
   430  		t.ticket = s.ticket
   431  		t.parent = s.parent
   432  		t.prev = s.prev
   433  		if t.prev != nil {
   434  			t.prev.parent = t
   435  		}
   436  		t.next = s.next
   437  		if t.next != nil {
   438  			t.next.parent = t
   439  		}
   440  		if t.waitlink != nil {
   441  			t.waittail = s.waittail
   442  		} else {
   443  			t.waittail = nil
   444  		}
   445  		t.waiters = s.waiters
   446  		if t.waiters > 1 {
   447  			t.waiters--
   448  		}
   449  		// Set head and tail acquire time to 'now',
   450  		// because the caller will take care of charging
   451  		// the delays before now for all entries in the list.
   452  		t.acquiretime = now
   453  		tailtime = s.waittail.acquiretime
   454  		s.waittail.acquiretime = now
   455  		s.waitlink = nil
   456  		s.waittail = nil
   457  	} else {
   458  		// Rotate s down to be leaf of tree for removal, respecting priorities.
   459  		for s.next != nil || s.prev != nil {
   460  			if s.next == nil || s.prev != nil && s.prev.ticket < s.next.ticket {
   461  				root.rotateRight(s)
   462  			} else {
   463  				root.rotateLeft(s)
   464  			}
   465  		}
   466  		// Remove s, now a leaf.
   467  		if s.parent != nil {
   468  			if s.parent.prev == s {
   469  				s.parent.prev = nil
   470  			} else {
   471  				s.parent.next = nil
   472  			}
   473  		} else {
   474  			root.treap = nil
   475  		}
   476  		tailtime = s.acquiretime
   477  	}
   478  	// Goroutine is no longer blocked. Clear the waiting pointer.
   479  	s.g.waiting = nil
   480  	s.parent = nil
   481  	s.elem.set(nil)
   482  	s.next = nil
   483  	s.prev = nil
   484  	s.ticket = 0
   485  	return s, now, tailtime
   486  }
   487  
   488  // rotateLeft rotates the tree rooted at node x.
   489  // turning (x a (y b c)) into (y (x a b) c).
   490  func (root *semaRoot) rotateLeft(x *sudog) {
   491  	// p -> (x a (y b c))
   492  	p := x.parent
   493  	y := x.next
   494  	b := y.prev
   495  
   496  	y.prev = x
   497  	x.parent = y
   498  	x.next = b
   499  	if b != nil {
   500  		b.parent = x
   501  	}
   502  
   503  	y.parent = p
   504  	if p == nil {
   505  		root.treap = y
   506  	} else if p.prev == x {
   507  		p.prev = y
   508  	} else {
   509  		if p.next != x {
   510  			throw("semaRoot rotateLeft")
   511  		}
   512  		p.next = y
   513  	}
   514  }
   515  
   516  // rotateRight rotates the tree rooted at node y.
   517  // turning (y (x a b) c) into (x a (y b c)).
   518  func (root *semaRoot) rotateRight(y *sudog) {
   519  	// p -> (y (x a b) c)
   520  	p := y.parent
   521  	x := y.prev
   522  	b := x.next
   523  
   524  	x.next = y
   525  	y.parent = x
   526  	y.prev = b
   527  	if b != nil {
   528  		b.parent = y
   529  	}
   530  
   531  	x.parent = p
   532  	if p == nil {
   533  		root.treap = x
   534  	} else if p.prev == y {
   535  		p.prev = x
   536  	} else {
   537  		if p.next != y {
   538  			throw("semaRoot rotateRight")
   539  		}
   540  		p.next = x
   541  	}
   542  }
   543  
   544  // notifyList is a ticket-based notification list used to implement sync.Cond.
   545  //
   546  // It must be kept in sync with the sync package.
   547  type notifyList struct {
   548  	// wait is the ticket number of the next waiter. It is atomically
   549  	// incremented outside the lock.
   550  	wait atomic.Uint32
   551  
   552  	// notify is the ticket number of the next waiter to be notified. It can
   553  	// be read outside the lock, but is only written to with lock held.
   554  	//
   555  	// Both wait & notify can wrap around, and such cases will be correctly
   556  	// handled as long as their "unwrapped" difference is bounded by 2^31.
   557  	// For this not to be the case, we'd need to have 2^31+ goroutines
   558  	// blocked on the same condvar, which is currently not possible.
   559  	notify uint32
   560  
   561  	// List of parked waiters.
   562  	lock mutex
   563  	head *sudog
   564  	tail *sudog
   565  }
   566  
   567  // less checks if a < b, considering a & b running counts that may overflow the
   568  // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
   569  func less(a, b uint32) bool {
   570  	return int32(a-b) < 0
   571  }
   572  
   573  // notifyListAdd adds the caller to a notify list such that it can receive
   574  // notifications. The caller must eventually call notifyListWait to wait for
   575  // such a notification, passing the returned ticket number.
   576  //
   577  //go:linkname notifyListAdd sync.runtime_notifyListAdd
   578  func notifyListAdd(l *notifyList) uint32 {
   579  	// This may be called concurrently, for example, when called from
   580  	// sync.Cond.Wait while holding a RWMutex in read mode.
   581  	return l.wait.Add(1) - 1
   582  }
   583  
   584  // notifyListWait waits for a notification. If one has been sent since
   585  // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
   586  //
   587  //go:linkname notifyListWait sync.runtime_notifyListWait
   588  func notifyListWait(l *notifyList, t uint32) {
   589  	lockWithRank(&l.lock, lockRankNotifyList)
   590  
   591  	// Return right away if this ticket has already been notified.
   592  	if less(t, l.notify) {
   593  		unlock(&l.lock)
   594  		return
   595  	}
   596  
   597  	// Enqueue itself.
   598  	s := acquireSudog()
   599  	s.g = getg()
   600  	// Storing this pointer so that we can trace the condvar address
   601  	// from the blocked goroutine when checking for goroutine leaks.
   602  	s.elem.set(unsafe.Pointer(l))
   603  	s.g.waiting = s
   604  	s.ticket = t
   605  	s.releasetime = 0
   606  	t0 := int64(0)
   607  	if blockprofilerate > 0 {
   608  		t0 = cputicks()
   609  		s.releasetime = -1
   610  	}
   611  	if l.tail == nil {
   612  		l.head = s
   613  	} else {
   614  		l.tail.next = s
   615  	}
   616  	l.tail = s
   617  	goparkunlock(&l.lock, waitReasonSyncCondWait, traceBlockCondWait, 3)
   618  	if t0 != 0 {
   619  		blockevent(s.releasetime-t0, 2)
   620  	}
   621  	// Goroutine is no longer blocked. Clear up its waiting pointer,
   622  	// and clean up the sudog before releasing it.
   623  	s.g.waiting = nil
   624  	s.elem.set(nil)
   625  	releaseSudog(s)
   626  }
   627  
   628  // notifyListNotifyAll notifies all entries in the list.
   629  //
   630  //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
   631  func notifyListNotifyAll(l *notifyList) {
   632  	// Fast-path: if there are no new waiters since the last notification
   633  	// we don't need to acquire the lock.
   634  	if l.wait.Load() == atomic.Load(&l.notify) {
   635  		return
   636  	}
   637  
   638  	// Pull the list out into a local variable, waiters will be readied
   639  	// outside the lock.
   640  	lockWithRank(&l.lock, lockRankNotifyList)
   641  	s := l.head
   642  	l.head = nil
   643  	l.tail = nil
   644  
   645  	// Update the next ticket to be notified. We can set it to the current
   646  	// value of wait because any previous waiters are already in the list
   647  	// or will notice that they have already been notified when trying to
   648  	// add themselves to the list.
   649  	atomic.Store(&l.notify, l.wait.Load())
   650  	unlock(&l.lock)
   651  
   652  	// Go through the local list and ready all waiters.
   653  	for s != nil {
   654  		next := s.next
   655  		s.next = nil
   656  		if s.g.bubble != nil && getg().bubble != s.g.bubble {
   657  			println("semaphore wake of synctest goroutine", s.g.goid, "from outside bubble")
   658  			fatal("semaphore wake of synctest goroutine from outside bubble")
   659  		}
   660  		readyWithTime(s, 4)
   661  		s = next
   662  	}
   663  }
   664  
   665  // notifyListNotifyOne notifies one entry in the list.
   666  //
   667  //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
   668  func notifyListNotifyOne(l *notifyList) {
   669  	// Fast-path: if there are no new waiters since the last notification
   670  	// we don't need to acquire the lock at all.
   671  	if l.wait.Load() == atomic.Load(&l.notify) {
   672  		return
   673  	}
   674  
   675  	lockWithRank(&l.lock, lockRankNotifyList)
   676  
   677  	// Re-check under the lock if we need to do anything.
   678  	t := l.notify
   679  	if t == l.wait.Load() {
   680  		unlock(&l.lock)
   681  		return
   682  	}
   683  
   684  	// Update the next notify ticket number.
   685  	atomic.Store(&l.notify, t+1)
   686  
   687  	// Try to find the g that needs to be notified.
   688  	// If it hasn't made it to the list yet we won't find it,
   689  	// but it won't park itself once it sees the new notify number.
   690  	//
   691  	// This scan looks linear but essentially always stops quickly.
   692  	// Because g's queue separately from taking numbers,
   693  	// there may be minor reorderings in the list, but we
   694  	// expect the g we're looking for to be near the front.
   695  	// The g has others in front of it on the list only to the
   696  	// extent that it lost the race, so the iteration will not
   697  	// be too long. This applies even when the g is missing:
   698  	// it hasn't yet gotten to sleep and has lost the race to
   699  	// the (few) other g's that we find on the list.
   700  	for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
   701  		if s.ticket == t {
   702  			n := s.next
   703  			if p != nil {
   704  				p.next = n
   705  			} else {
   706  				l.head = n
   707  			}
   708  			if n == nil {
   709  				l.tail = p
   710  			}
   711  			unlock(&l.lock)
   712  			s.next = nil
   713  			if s.g.bubble != nil && getg().bubble != s.g.bubble {
   714  				println("semaphore wake of synctest goroutine", s.g.goid, "from outside bubble")
   715  				fatal("semaphore wake of synctest goroutine from outside bubble")
   716  			}
   717  			readyWithTime(s, 4)
   718  			return
   719  		}
   720  	}
   721  	unlock(&l.lock)
   722  }
   723  
   724  //go:linkname notifyListCheck sync.runtime_notifyListCheck
   725  func notifyListCheck(sz uintptr) {
   726  	if sz != unsafe.Sizeof(notifyList{}) {
   727  		print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
   728  		throw("bad notifyList size")
   729  	}
   730  }
   731  
   732  //go:linkname internal_sync_nanotime internal/sync.runtime_nanotime
   733  func internal_sync_nanotime() int64 {
   734  	return nanotime()
   735  }
   736  

View as plain text