Source file src/runtime/netpoll.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build unix || (js && wasm) || wasip1 || windows
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  // Integrated network poller (platform-independent part).
    16  // A particular implementation (epoll/kqueue/port/AIX/Windows)
    17  // must define the following functions:
    18  //
    19  // func netpollinit()
    20  //     Initialize the poller. Only called once.
    21  //
    22  // func netpollopen(fd uintptr, pd *pollDesc) int32
    23  //     Arm edge-triggered notifications for fd. The pd argument is to pass
    24  //     back to netpollready when fd is ready. Return an errno value.
    25  //
    26  // func netpollclose(fd uintptr) int32
    27  //     Disable notifications for fd. Return an errno value.
    28  //
    29  // func netpoll(delta int64) (gList, int32)
    30  //     Poll the network. If delta < 0, block indefinitely. If delta == 0,
    31  //     poll without blocking. If delta > 0, block for up to delta nanoseconds.
    32  //     Return a list of goroutines built by calling netpollready,
    33  //     and a delta to add to netpollWaiters when all goroutines are ready.
    34  //     This will never return an empty list with a non-zero delta.
    35  //
    36  // func netpollBreak()
    37  //     Wake up the network poller, assumed to be blocked in netpoll.
    38  //
    39  // func netpollIsPollDescriptor(fd uintptr) bool
    40  //     Reports whether fd is a file descriptor used by the poller.
    41  
    42  // Error codes returned by runtime_pollReset and runtime_pollWait.
    43  // These must match the values in internal/poll/fd_poll_runtime.go.
    44  const (
    45  	pollNoError        = 0 // no error
    46  	pollErrClosing     = 1 // descriptor is closed
    47  	pollErrTimeout     = 2 // I/O timeout
    48  	pollErrNotPollable = 3 // general error polling descriptor
    49  )
    50  
    51  // pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
    52  // goroutines respectively. The semaphore can be in the following states:
    53  //
    54  //	pdReady - io readiness notification is pending;
    55  //	          a goroutine consumes the notification by changing the state to pdNil.
    56  //	pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
    57  //	         the goroutine commits to park by changing the state to G pointer,
    58  //	         or, alternatively, concurrent io notification changes the state to pdReady,
    59  //	         or, alternatively, concurrent timeout/close changes the state to pdNil.
    60  //	G pointer - the goroutine is blocked on the semaphore;
    61  //	            io notification or timeout/close changes the state to pdReady or pdNil respectively
    62  //	            and unparks the goroutine.
    63  //	pdNil - none of the above.
    64  const (
    65  	pdNil   uintptr = 0
    66  	pdReady uintptr = 1
    67  	pdWait  uintptr = 2
    68  )
    69  
    70  const pollBlockSize = 4 * 1024
    71  
    72  // Network poller descriptor.
    73  //
    74  // No heap pointers.
    75  type pollDesc struct {
    76  	_     sys.NotInHeap
    77  	link  *pollDesc      // in pollcache, protected by pollcache.lock
    78  	fd    uintptr        // constant for pollDesc usage lifetime
    79  	fdseq atomic.Uintptr // protects against stale pollDesc
    80  
    81  	// atomicInfo holds bits from closing, rd, and wd,
    82  	// which are only ever written while holding the lock,
    83  	// summarized for use by netpollcheckerr,
    84  	// which cannot acquire the lock.
    85  	// After writing these fields under lock in a way that
    86  	// might change the summary, code must call publishInfo
    87  	// before releasing the lock.
    88  	// Code that changes fields and then calls netpollunblock
    89  	// (while still holding the lock) must call publishInfo
    90  	// before calling netpollunblock, because publishInfo is what
    91  	// stops netpollblock from blocking anew
    92  	// (by changing the result of netpollcheckerr).
    93  	// atomicInfo also holds the eventErr bit,
    94  	// recording whether a poll event on the fd got an error;
    95  	// atomicInfo is the only source of truth for that bit.
    96  	atomicInfo atomic.Uint32 // atomic pollInfo
    97  
    98  	// rg, wg are accessed atomically and hold g pointers.
    99  	// (Using atomic.Uintptr here is similar to using guintptr elsewhere.)
   100  	rg atomic.Uintptr // pdReady, pdWait, G waiting for read or pdNil
   101  	wg atomic.Uintptr // pdReady, pdWait, G waiting for write or pdNil
   102  
   103  	lock    mutex // protects the following fields
   104  	closing bool
   105  	rrun    bool      // whether rt is running
   106  	wrun    bool      // whether wt is running
   107  	user    uint32    // user settable cookie
   108  	rseq    uintptr   // protects from stale read timers
   109  	rt      timer     // read deadline timer
   110  	rd      int64     // read deadline (a nanotime in the future, -1 when expired)
   111  	wseq    uintptr   // protects from stale write timers
   112  	wt      timer     // write deadline timer
   113  	wd      int64     // write deadline (a nanotime in the future, -1 when expired)
   114  	self    *pollDesc // storage for indirect interface. See (*pollDesc).makeArg.
   115  }
   116  
   117  // pollInfo is the bits needed by netpollcheckerr, stored atomically,
   118  // mostly duplicating state that is manipulated under lock in pollDesc.
   119  // The one exception is the pollEventErr bit, which is maintained only
   120  // in the pollInfo.
   121  type pollInfo uint32
   122  
   123  const (
   124  	pollClosing = 1 << iota
   125  	pollEventErr
   126  	pollExpiredReadDeadline
   127  	pollExpiredWriteDeadline
   128  	pollFDSeq // 20 bit field, low 20 bits of fdseq field
   129  )
   130  
   131  const (
   132  	pollFDSeqBits = 20                   // number of bits in pollFDSeq
   133  	pollFDSeqMask = 1<<pollFDSeqBits - 1 // mask for pollFDSeq
   134  )
   135  
   136  func (i pollInfo) closing() bool              { return i&pollClosing != 0 }
   137  func (i pollInfo) eventErr() bool             { return i&pollEventErr != 0 }
   138  func (i pollInfo) expiredReadDeadline() bool  { return i&pollExpiredReadDeadline != 0 }
   139  func (i pollInfo) expiredWriteDeadline() bool { return i&pollExpiredWriteDeadline != 0 }
   140  
   141  // info returns the pollInfo corresponding to pd.
   142  func (pd *pollDesc) info() pollInfo {
   143  	return pollInfo(pd.atomicInfo.Load())
   144  }
   145  
   146  // publishInfo updates pd.atomicInfo (returned by pd.info)
   147  // using the other values in pd.
   148  // It must be called while holding pd.lock,
   149  // and it must be called after changing anything
   150  // that might affect the info bits.
   151  // In practice this means after changing closing
   152  // or changing rd or wd from < 0 to >= 0.
   153  func (pd *pollDesc) publishInfo() {
   154  	var info uint32
   155  	if pd.closing {
   156  		info |= pollClosing
   157  	}
   158  	if pd.rd < 0 {
   159  		info |= pollExpiredReadDeadline
   160  	}
   161  	if pd.wd < 0 {
   162  		info |= pollExpiredWriteDeadline
   163  	}
   164  	info |= uint32(pd.fdseq.Load()&pollFDSeqMask) << pollFDSeq
   165  
   166  	// Set all of x except the pollEventErr bit.
   167  	x := pd.atomicInfo.Load()
   168  	for !pd.atomicInfo.CompareAndSwap(x, (x&pollEventErr)|info) {
   169  		x = pd.atomicInfo.Load()
   170  	}
   171  }
   172  
   173  // setEventErr sets the result of pd.info().eventErr() to b.
   174  // We only change the error bit if seq == 0 or if seq matches pollFDSeq
   175  // (issue #59545).
   176  func (pd *pollDesc) setEventErr(b bool, seq uintptr) {
   177  	mSeq := uint32(seq & pollFDSeqMask)
   178  	x := pd.atomicInfo.Load()
   179  	xSeq := (x >> pollFDSeq) & pollFDSeqMask
   180  	if seq != 0 && xSeq != mSeq {
   181  		return
   182  	}
   183  	for (x&pollEventErr != 0) != b && !pd.atomicInfo.CompareAndSwap(x, x^pollEventErr) {
   184  		x = pd.atomicInfo.Load()
   185  		xSeq := (x >> pollFDSeq) & pollFDSeqMask
   186  		if seq != 0 && xSeq != mSeq {
   187  			return
   188  		}
   189  	}
   190  }
   191  
   192  type pollCache struct {
   193  	lock  mutex
   194  	first *pollDesc
   195  	// PollDesc objects must be type-stable,
   196  	// because we can get ready notification from epoll/kqueue
   197  	// after the descriptor is closed/reused.
   198  	// Stale notifications are detected using seq variable,
   199  	// seq is incremented when deadlines are changed or descriptor is reused.
   200  }
   201  
   202  var (
   203  	netpollInitLock mutex
   204  	netpollInited   atomic.Uint32
   205  
   206  	pollcache      pollCache
   207  	netpollWaiters atomic.Uint32
   208  )
   209  
   210  //go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
   211  func poll_runtime_pollServerInit() {
   212  	netpollGenericInit()
   213  }
   214  
   215  func netpollGenericInit() {
   216  	if netpollInited.Load() == 0 {
   217  		lockInit(&netpollInitLock, lockRankNetpollInit)
   218  		lockInit(&pollcache.lock, lockRankPollCache)
   219  		lock(&netpollInitLock)
   220  		if netpollInited.Load() == 0 {
   221  			netpollinit()
   222  			netpollInited.Store(1)
   223  		}
   224  		unlock(&netpollInitLock)
   225  	}
   226  }
   227  
   228  func netpollinited() bool {
   229  	return netpollInited.Load() != 0
   230  }
   231  
   232  //go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor
   233  
   234  // poll_runtime_isPollServerDescriptor reports whether fd is a
   235  // descriptor being used by netpoll.
   236  func poll_runtime_isPollServerDescriptor(fd uintptr) bool {
   237  	return netpollIsPollDescriptor(fd)
   238  }
   239  
   240  //go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
   241  func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
   242  	pd := pollcache.alloc()
   243  	lock(&pd.lock)
   244  	wg := pd.wg.Load()
   245  	if wg != pdNil && wg != pdReady {
   246  		throw("runtime: blocked write on free polldesc")
   247  	}
   248  	rg := pd.rg.Load()
   249  	if rg != pdNil && rg != pdReady {
   250  		throw("runtime: blocked read on free polldesc")
   251  	}
   252  	pd.fd = fd
   253  	if pd.fdseq.Load() == 0 {
   254  		// The value 0 is special in setEventErr, so don't use it.
   255  		pd.fdseq.Store(1)
   256  	}
   257  	pd.closing = false
   258  	pd.setEventErr(false, 0)
   259  	pd.rseq++
   260  	pd.rg.Store(pdNil)
   261  	pd.rd = 0
   262  	pd.wseq++
   263  	pd.wg.Store(pdNil)
   264  	pd.wd = 0
   265  	pd.self = pd
   266  	pd.publishInfo()
   267  	unlock(&pd.lock)
   268  
   269  	errno := netpollopen(fd, pd)
   270  	if errno != 0 {
   271  		pollcache.free(pd)
   272  		return nil, int(errno)
   273  	}
   274  	return pd, 0
   275  }
   276  
   277  //go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose
   278  func poll_runtime_pollClose(pd *pollDesc) {
   279  	if !pd.closing {
   280  		throw("runtime: close polldesc w/o unblock")
   281  	}
   282  	wg := pd.wg.Load()
   283  	if wg != pdNil && wg != pdReady {
   284  		throw("runtime: blocked write on closing polldesc")
   285  	}
   286  	rg := pd.rg.Load()
   287  	if rg != pdNil && rg != pdReady {
   288  		throw("runtime: blocked read on closing polldesc")
   289  	}
   290  	netpollclose(pd.fd)
   291  	pollcache.free(pd)
   292  }
   293  
   294  func (c *pollCache) free(pd *pollDesc) {
   295  	// pd can't be shared here, but lock anyhow because
   296  	// that's what publishInfo documents.
   297  	lock(&pd.lock)
   298  
   299  	// Increment the fdseq field, so that any currently
   300  	// running netpoll calls will not mark pd as ready.
   301  	fdseq := pd.fdseq.Load()
   302  	fdseq = (fdseq + 1) & (1<<taggedPointerBits - 1)
   303  	pd.fdseq.Store(fdseq)
   304  
   305  	pd.publishInfo()
   306  
   307  	unlock(&pd.lock)
   308  
   309  	lock(&c.lock)
   310  	pd.link = c.first
   311  	c.first = pd
   312  	unlock(&c.lock)
   313  }
   314  
   315  // poll_runtime_pollReset, which is internal/poll.runtime_pollReset,
   316  // prepares a descriptor for polling in mode, which is 'r' or 'w'.
   317  // This returns an error code; the codes are defined above.
   318  //
   319  //go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
   320  func poll_runtime_pollReset(pd *pollDesc, mode int) int {
   321  	errcode := netpollcheckerr(pd, int32(mode))
   322  	if errcode != pollNoError {
   323  		return errcode
   324  	}
   325  	if mode == 'r' {
   326  		pd.rg.Store(pdNil)
   327  	} else if mode == 'w' {
   328  		pd.wg.Store(pdNil)
   329  	}
   330  	return pollNoError
   331  }
   332  
   333  // poll_runtime_pollWait, which is internal/poll.runtime_pollWait,
   334  // waits for a descriptor to be ready for reading or writing,
   335  // according to mode, which is 'r' or 'w'.
   336  // This returns an error code; the codes are defined above.
   337  //
   338  //go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
   339  func poll_runtime_pollWait(pd *pollDesc, mode int) int {
   340  	errcode := netpollcheckerr(pd, int32(mode))
   341  	if errcode != pollNoError {
   342  		return errcode
   343  	}
   344  	// As for now only Solaris, illumos, AIX and wasip1 use level-triggered IO.
   345  	if GOOS == "solaris" || GOOS == "illumos" || GOOS == "aix" || GOOS == "wasip1" {
   346  		netpollarm(pd, mode)
   347  	}
   348  	for !netpollblock(pd, int32(mode), false) {
   349  		errcode = netpollcheckerr(pd, int32(mode))
   350  		if errcode != pollNoError {
   351  			return errcode
   352  		}
   353  		// Can happen if timeout has fired and unblocked us,
   354  		// but before we had a chance to run, timeout has been reset.
   355  		// Pretend it has not happened and retry.
   356  	}
   357  	return pollNoError
   358  }
   359  
   360  //go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled
   361  func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
   362  	// This function is used only on windows after a failed attempt to cancel
   363  	// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
   364  	for !netpollblock(pd, int32(mode), true) {
   365  	}
   366  }
   367  
   368  //go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline
   369  func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
   370  	lock(&pd.lock)
   371  	if pd.closing {
   372  		unlock(&pd.lock)
   373  		return
   374  	}
   375  	rd0, wd0 := pd.rd, pd.wd
   376  	combo0 := rd0 > 0 && rd0 == wd0
   377  	if d > 0 {
   378  		d += nanotime()
   379  		if d <= 0 {
   380  			// If the user has a deadline in the future, but the delay calculation
   381  			// overflows, then set the deadline to the maximum possible value.
   382  			d = 1<<63 - 1
   383  		}
   384  	}
   385  	if mode == 'r' || mode == 'r'+'w' {
   386  		pd.rd = d
   387  	}
   388  	if mode == 'w' || mode == 'r'+'w' {
   389  		pd.wd = d
   390  	}
   391  	pd.publishInfo()
   392  	combo := pd.rd > 0 && pd.rd == pd.wd
   393  	rtf := netpollReadDeadline
   394  	if combo {
   395  		rtf = netpollDeadline
   396  	}
   397  	if !pd.rrun {
   398  		if pd.rd > 0 {
   399  			// Copy current seq into the timer arg.
   400  			// Timer func will check the seq against current descriptor seq,
   401  			// if they differ the descriptor was reused or timers were reset.
   402  			pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq)
   403  			pd.rrun = true
   404  		}
   405  	} else if pd.rd != rd0 || combo != combo0 {
   406  		pd.rseq++ // invalidate current timers
   407  		if pd.rd > 0 {
   408  			pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq)
   409  		} else {
   410  			pd.rt.stop()
   411  			pd.rrun = false
   412  		}
   413  	}
   414  	if !pd.wrun {
   415  		if pd.wd > 0 && !combo {
   416  			pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq)
   417  			pd.wrun = true
   418  		}
   419  	} else if pd.wd != wd0 || combo != combo0 {
   420  		pd.wseq++ // invalidate current timers
   421  		if pd.wd > 0 && !combo {
   422  			pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq)
   423  		} else {
   424  			pd.wt.stop()
   425  			pd.wrun = false
   426  		}
   427  	}
   428  	// If we set the new deadline in the past, unblock currently pending IO if any.
   429  	// Note that pd.publishInfo has already been called, above, immediately after modifying rd and wd.
   430  	delta := int32(0)
   431  	var rg, wg *g
   432  	if pd.rd < 0 {
   433  		rg = netpollunblock(pd, 'r', false, &delta)
   434  	}
   435  	if pd.wd < 0 {
   436  		wg = netpollunblock(pd, 'w', false, &delta)
   437  	}
   438  	unlock(&pd.lock)
   439  	if rg != nil {
   440  		netpollgoready(rg, 3)
   441  	}
   442  	if wg != nil {
   443  		netpollgoready(wg, 3)
   444  	}
   445  	netpollAdjustWaiters(delta)
   446  }
   447  
   448  //go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
   449  func poll_runtime_pollUnblock(pd *pollDesc) {
   450  	lock(&pd.lock)
   451  	if pd.closing {
   452  		throw("runtime: unblock on closing polldesc")
   453  	}
   454  	pd.closing = true
   455  	pd.rseq++
   456  	pd.wseq++
   457  	var rg, wg *g
   458  	pd.publishInfo()
   459  	delta := int32(0)
   460  	rg = netpollunblock(pd, 'r', false, &delta)
   461  	wg = netpollunblock(pd, 'w', false, &delta)
   462  	if pd.rrun {
   463  		pd.rt.stop()
   464  		pd.rrun = false
   465  	}
   466  	if pd.wrun {
   467  		pd.wt.stop()
   468  		pd.wrun = false
   469  	}
   470  	unlock(&pd.lock)
   471  	if rg != nil {
   472  		netpollgoready(rg, 3)
   473  	}
   474  	if wg != nil {
   475  		netpollgoready(wg, 3)
   476  	}
   477  	netpollAdjustWaiters(delta)
   478  }
   479  
   480  // netpollready is called by the platform-specific netpoll function.
   481  // It declares that the fd associated with pd is ready for I/O.
   482  // The toRun argument is used to build a list of goroutines to return
   483  // from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
   484  // whether the fd is ready for reading or writing or both.
   485  //
   486  // This returns a delta to apply to netpollWaiters.
   487  //
   488  // This may run while the world is stopped, so write barriers are not allowed.
   489  //
   490  //go:nowritebarrier
   491  func netpollready(toRun *gList, pd *pollDesc, mode int32) int32 {
   492  	delta := int32(0)
   493  	var rg, wg *g
   494  	if mode == 'r' || mode == 'r'+'w' {
   495  		rg = netpollunblock(pd, 'r', true, &delta)
   496  	}
   497  	if mode == 'w' || mode == 'r'+'w' {
   498  		wg = netpollunblock(pd, 'w', true, &delta)
   499  	}
   500  	if rg != nil {
   501  		toRun.push(rg)
   502  	}
   503  	if wg != nil {
   504  		toRun.push(wg)
   505  	}
   506  	return delta
   507  }
   508  
   509  func netpollcheckerr(pd *pollDesc, mode int32) int {
   510  	info := pd.info()
   511  	if info.closing() {
   512  		return pollErrClosing
   513  	}
   514  	if (mode == 'r' && info.expiredReadDeadline()) || (mode == 'w' && info.expiredWriteDeadline()) {
   515  		return pollErrTimeout
   516  	}
   517  	// Report an event scanning error only on a read event.
   518  	// An error on a write event will be captured in a subsequent
   519  	// write call that is able to report a more specific error.
   520  	if mode == 'r' && info.eventErr() {
   521  		return pollErrNotPollable
   522  	}
   523  	return pollNoError
   524  }
   525  
   526  func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
   527  	r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
   528  	if r {
   529  		// Bump the count of goroutines waiting for the poller.
   530  		// The scheduler uses this to decide whether to block
   531  		// waiting for the poller if there is nothing else to do.
   532  		netpollAdjustWaiters(1)
   533  	}
   534  	return r
   535  }
   536  
   537  func netpollgoready(gp *g, traceskip int) {
   538  	goready(gp, traceskip+1)
   539  }
   540  
   541  // returns true if IO is ready, or false if timed out or closed
   542  // waitio - wait only for completed IO, ignore errors
   543  // Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
   544  // can hold only a single waiting goroutine for each mode.
   545  func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
   546  	gpp := &pd.rg
   547  	if mode == 'w' {
   548  		gpp = &pd.wg
   549  	}
   550  
   551  	// set the gpp semaphore to pdWait
   552  	for {
   553  		// Consume notification if already ready.
   554  		if gpp.CompareAndSwap(pdReady, pdNil) {
   555  			return true
   556  		}
   557  		if gpp.CompareAndSwap(pdNil, pdWait) {
   558  			break
   559  		}
   560  
   561  		// Double check that this isn't corrupt; otherwise we'd loop
   562  		// forever.
   563  		if v := gpp.Load(); v != pdReady && v != pdNil {
   564  			throw("runtime: double wait")
   565  		}
   566  	}
   567  
   568  	// need to recheck error states after setting gpp to pdWait
   569  	// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
   570  	// do the opposite: store to closing/rd/wd, publishInfo, load of rg/wg
   571  	if waitio || netpollcheckerr(pd, mode) == pollNoError {
   572  		gopark(netpollblockcommit, unsafe.Pointer(gpp), waitReasonIOWait, traceBlockNet, 5)
   573  	}
   574  	// be careful to not lose concurrent pdReady notification
   575  	old := gpp.Swap(pdNil)
   576  	if old > pdWait {
   577  		throw("runtime: corrupted polldesc")
   578  	}
   579  	return old == pdReady
   580  }
   581  
   582  // netpollunblock moves either pd.rg (if mode == 'r') or
   583  // pd.wg (if mode == 'w') into the pdReady state.
   584  // This returns any goroutine blocked on pd.{rg,wg}.
   585  // It adds any adjustment to netpollWaiters to *delta;
   586  // this adjustment should be applied after the goroutine has
   587  // been marked ready.
   588  func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g {
   589  	gpp := &pd.rg
   590  	if mode == 'w' {
   591  		gpp = &pd.wg
   592  	}
   593  
   594  	for {
   595  		old := gpp.Load()
   596  		if old == pdReady {
   597  			return nil
   598  		}
   599  		if old == pdNil && !ioready {
   600  			// Only set pdReady for ioready. runtime_pollWait
   601  			// will check for timeout/cancel before waiting.
   602  			return nil
   603  		}
   604  		new := pdNil
   605  		if ioready {
   606  			new = pdReady
   607  		}
   608  		if gpp.CompareAndSwap(old, new) {
   609  			if old == pdWait {
   610  				old = pdNil
   611  			} else if old != pdNil {
   612  				*delta -= 1
   613  			}
   614  			return (*g)(unsafe.Pointer(old))
   615  		}
   616  	}
   617  }
   618  
   619  func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
   620  	lock(&pd.lock)
   621  	// Seq arg is seq when the timer was set.
   622  	// If it's stale, ignore the timer event.
   623  	currentSeq := pd.rseq
   624  	if !read {
   625  		currentSeq = pd.wseq
   626  	}
   627  	if seq != currentSeq {
   628  		// The descriptor was reused or timers were reset.
   629  		unlock(&pd.lock)
   630  		return
   631  	}
   632  	delta := int32(0)
   633  	var rg *g
   634  	if read {
   635  		if pd.rd <= 0 || !pd.rrun {
   636  			throw("runtime: inconsistent read deadline")
   637  		}
   638  		pd.rd = -1
   639  		pd.publishInfo()
   640  		rg = netpollunblock(pd, 'r', false, &delta)
   641  	}
   642  	var wg *g
   643  	if write {
   644  		if pd.wd <= 0 || !pd.wrun && !read {
   645  			throw("runtime: inconsistent write deadline")
   646  		}
   647  		pd.wd = -1
   648  		pd.publishInfo()
   649  		wg = netpollunblock(pd, 'w', false, &delta)
   650  	}
   651  	unlock(&pd.lock)
   652  	if rg != nil {
   653  		netpollgoready(rg, 0)
   654  	}
   655  	if wg != nil {
   656  		netpollgoready(wg, 0)
   657  	}
   658  	netpollAdjustWaiters(delta)
   659  }
   660  
   661  func netpollDeadline(arg any, seq uintptr, delta int64) {
   662  	netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
   663  }
   664  
   665  func netpollReadDeadline(arg any, seq uintptr, delta int64) {
   666  	netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
   667  }
   668  
   669  func netpollWriteDeadline(arg any, seq uintptr, delta int64) {
   670  	netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
   671  }
   672  
   673  // netpollAnyWaiters reports whether any goroutines are waiting for I/O.
   674  func netpollAnyWaiters() bool {
   675  	return netpollWaiters.Load() > 0
   676  }
   677  
   678  // netpollAdjustWaiters adds delta to netpollWaiters.
   679  func netpollAdjustWaiters(delta int32) {
   680  	if delta != 0 {
   681  		netpollWaiters.Add(delta)
   682  	}
   683  }
   684  
   685  func (c *pollCache) alloc() *pollDesc {
   686  	lock(&c.lock)
   687  	if c.first == nil {
   688  		const pdSize = unsafe.Sizeof(pollDesc{})
   689  		n := pollBlockSize / pdSize
   690  		if n == 0 {
   691  			n = 1
   692  		}
   693  		// Must be in non-GC memory because can be referenced
   694  		// only from epoll/kqueue internals.
   695  		mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
   696  		for i := uintptr(0); i < n; i++ {
   697  			pd := (*pollDesc)(add(mem, i*pdSize))
   698  			lockInit(&pd.lock, lockRankPollDesc)
   699  			pd.rt.init(nil, nil)
   700  			pd.wt.init(nil, nil)
   701  			pd.link = c.first
   702  			c.first = pd
   703  		}
   704  	}
   705  	pd := c.first
   706  	c.first = pd.link
   707  	unlock(&c.lock)
   708  	return pd
   709  }
   710  
   711  // makeArg converts pd to an interface{}.
   712  // makeArg does not do any allocation. Normally, such
   713  // a conversion requires an allocation because pointers to
   714  // types which embed runtime/internal/sys.NotInHeap (which pollDesc is)
   715  // must be stored in interfaces indirectly. See issue 42076.
   716  func (pd *pollDesc) makeArg() (i any) {
   717  	x := (*eface)(unsafe.Pointer(&i))
   718  	x._type = pdType
   719  	x.data = unsafe.Pointer(&pd.self)
   720  	return
   721  }
   722  
   723  var (
   724  	pdEface any    = (*pollDesc)(nil)
   725  	pdType  *_type = efaceOf(&pdEface)._type
   726  )
   727  

View as plain text