Source file src/runtime/lock_spinbit.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !wasm
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/goarch"
    11  	"internal/runtime/atomic"
    12  	"internal/runtime/gc"
    13  	"unsafe"
    14  )
    15  
    16  // This implementation depends on OS-specific implementations of
    17  //
    18  //	func semacreate(mp *m)
    19  //		Create a semaphore for mp, if it does not already have one.
    20  //
    21  //	func semasleep(ns int64) int32
    22  //		If ns < 0, acquire m's semaphore and return 0.
    23  //		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
    24  //		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
    25  //
    26  //	func semawakeup(mp *m)
    27  //		Wake up mp, which is or will soon be sleeping on its semaphore.
    28  
    29  // The mutex state consists of four flags and a pointer. The flag at bit 0,
    30  // mutexLocked, represents the lock itself. Bit 1, mutexSleeping, is a hint that
    31  // the pointer is non-nil. The fast paths for locking and unlocking the mutex
    32  // are based on atomic 8-bit swap operations on the low byte; bits 2 through 7
    33  // are unused.
    34  //
    35  // Bit 8, mutexSpinning, is a try-lock that grants a waiting M permission to
    36  // spin on the state word. Most other Ms must attempt to spend their time
    37  // sleeping to reduce traffic on the cache line. This is the "spin bit" for
    38  // which the implementation is named. (The anti-starvation mechanism also grants
    39  // temporary permission for an M to spin.)
    40  //
    41  // Bit 9, mutexStackLocked, is a try-lock that grants an unlocking M permission
    42  // to inspect the list of waiting Ms and to pop an M off of that stack.
    43  //
    44  // The upper bits hold a (partial) pointer to the M that most recently went to
    45  // sleep. The sleeping Ms form a stack linked by their mWaitList.next fields.
    46  // Because the fast paths use an 8-bit swap on the low byte of the state word,
    47  // we'll need to reconstruct the full M pointer from the bits we have. Most Ms
    48  // are allocated on the heap, and have a known alignment and base offset. (The
    49  // offset is due to mallocgc's allocation headers.) The main program thread uses
    50  // a static M value, m0. We check for m0 specifically and add a known offset
    51  // otherwise.
    52  
    53  const (
    54  	active_spin     = 4  // referenced in proc.go for sync.Mutex implementation
    55  	active_spin_cnt = 30 // referenced in proc.go for sync.Mutex implementation
    56  )
    57  
    58  const (
    59  	mutexLocked      = 0x001
    60  	mutexSleeping    = 0x002
    61  	mutexSpinning    = 0x100
    62  	mutexStackLocked = 0x200
    63  	mutexMMask       = 0x3FF
    64  	mutexMOffset     = gc.MallocHeaderSize // alignment of heap-allocated Ms (those other than m0)
    65  
    66  	mutexActiveSpinCount  = 4
    67  	mutexActiveSpinSize   = 30
    68  	mutexPassiveSpinCount = 1
    69  
    70  	mutexTailWakePeriod = 16
    71  )
    72  
    73  //go:nosplit
    74  func key8(p *uintptr) *uint8 {
    75  	if goarch.BigEndian {
    76  		return &(*[8]uint8)(unsafe.Pointer(p))[goarch.PtrSize/1-1]
    77  	}
    78  	return &(*[8]uint8)(unsafe.Pointer(p))[0]
    79  }
    80  
    81  // mWaitList is part of the M struct, and holds the list of Ms that are waiting
    82  // for a particular runtime.mutex.
    83  //
    84  // When an M is unable to immediately obtain a lock, it adds itself to the list
    85  // of Ms waiting for the lock. It does that via this struct's next field,
    86  // forming a singly-linked list with the mutex's key field pointing to the head
    87  // of the list.
    88  type mWaitList struct {
    89  	next       muintptr // next m waiting for lock
    90  	startTicks int64    // when this m started waiting for the current lock holder, in cputicks
    91  }
    92  
    93  // lockVerifyMSize confirms that we can recreate the low bits of the M pointer.
    94  func lockVerifyMSize() {
    95  	size := roundupsize(unsafe.Sizeof(mPadded{}), false) + gc.MallocHeaderSize
    96  	if size&mutexMMask != 0 {
    97  		print("M structure uses sizeclass ", size, "/", hex(size), " bytes; ",
    98  			"incompatible with mutex flag mask ", hex(mutexMMask), "\n")
    99  		throw("runtime.m memory alignment too small for spinbit mutex")
   100  	}
   101  }
   102  
   103  // mutexWaitListHead recovers a full muintptr that was missing its low bits.
   104  // With the exception of the static m0 value, it requires allocating runtime.m
   105  // values in a size class with a particular minimum alignment. The 2048-byte
   106  // size class allows recovering the full muintptr value even after overwriting
   107  // the low 11 bits with flags. We can use those 11 bits as 3 flags and an
   108  // atomically-swapped byte.
   109  //
   110  //go:nosplit
   111  func mutexWaitListHead(v uintptr) muintptr {
   112  	if highBits := v &^ mutexMMask; highBits == 0 {
   113  		return 0
   114  	} else if m0bits := muintptr(unsafe.Pointer(&m0)); highBits == uintptr(m0bits)&^mutexMMask {
   115  		return m0bits
   116  	} else {
   117  		return muintptr(highBits + mutexMOffset)
   118  	}
   119  }
   120  
   121  // mutexPreferLowLatency reports if this mutex prefers low latency at the risk
   122  // of performance collapse. If so, we can allow all waiting threads to spin on
   123  // the state word rather than go to sleep.
   124  //
   125  // TODO: We could have the waiting Ms each spin on their own private cache line,
   126  // especially if we can put a bound on the on-CPU time that would consume.
   127  //
   128  // TODO: If there's a small set of mutex values with special requirements, they
   129  // could make use of a more specialized lock2/unlock2 implementation. Otherwise,
   130  // we're constrained to what we can fit within a single uintptr with no
   131  // additional storage on the M for each lock held.
   132  //
   133  //go:nosplit
   134  func mutexPreferLowLatency(l *mutex) bool {
   135  	switch l {
   136  	default:
   137  		return false
   138  	case &sched.lock:
   139  		// We often expect sched.lock to pass quickly between Ms in a way that
   140  		// each M has unique work to do: for instance when we stop-the-world
   141  		// (bringing each P to idle) or add new netpoller-triggered work to the
   142  		// global run queue.
   143  		return true
   144  	}
   145  }
   146  
   147  func mutexContended(l *mutex) bool {
   148  	return atomic.Loaduintptr(&l.key)&^mutexMMask != 0
   149  }
   150  
   151  func lock(l *mutex) {
   152  	lockWithRank(l, getLockRank(l))
   153  }
   154  
   155  func lock2(l *mutex) {
   156  	gp := getg()
   157  	if gp.m.locks < 0 {
   158  		throw("runtime·lock: lock count")
   159  	}
   160  	gp.m.locks++
   161  
   162  	k8 := key8(&l.key)
   163  
   164  	// Speculative grab for lock.
   165  	v8 := atomic.Xchg8(k8, mutexLocked)
   166  	if v8&mutexLocked == 0 {
   167  		if v8&mutexSleeping != 0 {
   168  			atomic.Or8(k8, mutexSleeping)
   169  		}
   170  		return
   171  	}
   172  	semacreate(gp.m)
   173  
   174  	var startTime int64
   175  	// On uniprocessors, no point spinning.
   176  	// On multiprocessors, spin for mutexActiveSpinCount attempts.
   177  	spin := 0
   178  	if numCPUStartup > 1 {
   179  		spin = mutexActiveSpinCount
   180  	}
   181  
   182  	var weSpin, atTail, haveTimers bool
   183  	v := atomic.Loaduintptr(&l.key)
   184  tryAcquire:
   185  	for i := 0; ; i++ {
   186  		if v&mutexLocked == 0 {
   187  			if weSpin {
   188  				next := (v &^ mutexSpinning) | mutexSleeping | mutexLocked
   189  				if next&^mutexMMask == 0 {
   190  					// The fast-path Xchg8 may have cleared mutexSleeping. Fix
   191  					// the hint so unlock2 knows when to use its slow path.
   192  					next = next &^ mutexSleeping
   193  				}
   194  				if atomic.Casuintptr(&l.key, v, next) {
   195  					gp.m.mLockProfile.end(startTime)
   196  					return
   197  				}
   198  			} else {
   199  				prev8 := atomic.Xchg8(k8, mutexLocked|mutexSleeping)
   200  				if prev8&mutexLocked == 0 {
   201  					gp.m.mLockProfile.end(startTime)
   202  					return
   203  				}
   204  			}
   205  			v = atomic.Loaduintptr(&l.key)
   206  			continue tryAcquire
   207  		}
   208  
   209  		if !weSpin && v&mutexSpinning == 0 && atomic.Casuintptr(&l.key, v, v|mutexSpinning) {
   210  			v |= mutexSpinning
   211  			weSpin = true
   212  		}
   213  
   214  		if weSpin || atTail || mutexPreferLowLatency(l) {
   215  			if i < spin {
   216  				procyield(mutexActiveSpinSize)
   217  				v = atomic.Loaduintptr(&l.key)
   218  				continue tryAcquire
   219  			} else if i < spin+mutexPassiveSpinCount {
   220  				osyield() // TODO: Consider removing this step. See https://go.dev/issue/69268.
   221  				v = atomic.Loaduintptr(&l.key)
   222  				continue tryAcquire
   223  			}
   224  		}
   225  
   226  		// Go to sleep
   227  		if v&mutexLocked == 0 {
   228  			throw("runtime·lock: sleeping while lock is available")
   229  		}
   230  
   231  		// Collect times for mutex profile (seen in unlock2 only via mWaitList),
   232  		// and for "/sync/mutex/wait/total:seconds" metric (to match).
   233  		if !haveTimers {
   234  			gp.m.mWaitList.startTicks = cputicks()
   235  			startTime = gp.m.mLockProfile.start()
   236  			haveTimers = true
   237  		}
   238  		// Store the current head of the list of sleeping Ms in our gp.m.mWaitList.next field
   239  		gp.m.mWaitList.next = mutexWaitListHead(v)
   240  
   241  		// Pack a (partial) pointer to this M with the current lock state bits
   242  		next := (uintptr(unsafe.Pointer(gp.m)) &^ mutexMMask) | v&mutexMMask | mutexSleeping
   243  		if weSpin { // If we were spinning, prepare to retire
   244  			next = next &^ mutexSpinning
   245  		}
   246  
   247  		if atomic.Casuintptr(&l.key, v, next) {
   248  			weSpin = false
   249  			// We've pushed ourselves onto the stack of waiters. Wait.
   250  			semasleep(-1)
   251  			atTail = gp.m.mWaitList.next == 0 // we were at risk of starving
   252  			i = 0
   253  		}
   254  
   255  		gp.m.mWaitList.next = 0
   256  		v = atomic.Loaduintptr(&l.key)
   257  	}
   258  }
   259  
   260  func unlock(l *mutex) {
   261  	unlockWithRank(l)
   262  }
   263  
   264  // We might not be holding a p in this code.
   265  //
   266  //go:nowritebarrier
   267  func unlock2(l *mutex) {
   268  	gp := getg()
   269  
   270  	var prev8 uint8
   271  	var haveStackLock bool
   272  	var endTicks int64
   273  	if !mutexSampleContention() {
   274  		// Not collecting a sample for the contention profile, do the quick release
   275  		prev8 = atomic.Xchg8(key8(&l.key), 0)
   276  	} else {
   277  		// If there's contention, we'll sample it. Don't allow another
   278  		// lock2/unlock2 pair to finish before us and take our blame. Prevent
   279  		// that by trading for the stack lock with a CAS.
   280  		v := atomic.Loaduintptr(&l.key)
   281  		for {
   282  			if v&^mutexMMask == 0 || v&mutexStackLocked != 0 {
   283  				// No contention, or (stack lock unavailable) no way to calculate it
   284  				prev8 = atomic.Xchg8(key8(&l.key), 0)
   285  				endTicks = 0
   286  				break
   287  			}
   288  
   289  			// There's contention, the stack lock appeared to be available, and
   290  			// we'd like to collect a sample for the contention profile.
   291  			if endTicks == 0 {
   292  				// Read the time before releasing the lock. The profile will be
   293  				// strictly smaller than what other threads would see by timing
   294  				// their lock calls.
   295  				endTicks = cputicks()
   296  			}
   297  			next := (v | mutexStackLocked) &^ (mutexLocked | mutexSleeping)
   298  			if atomic.Casuintptr(&l.key, v, next) {
   299  				haveStackLock = true
   300  				prev8 = uint8(v)
   301  				// The fast path of lock2 may have cleared mutexSleeping.
   302  				// Restore it so we're sure to call unlock2Wake below.
   303  				prev8 |= mutexSleeping
   304  				break
   305  			}
   306  			v = atomic.Loaduintptr(&l.key)
   307  		}
   308  	}
   309  	if prev8&mutexLocked == 0 {
   310  		throw("unlock of unlocked lock")
   311  	}
   312  
   313  	if prev8&mutexSleeping != 0 {
   314  		unlock2Wake(l, haveStackLock, endTicks)
   315  	}
   316  
   317  	gp.m.mLockProfile.store()
   318  	gp.m.locks--
   319  	if gp.m.locks < 0 {
   320  		throw("runtime·unlock: lock count")
   321  	}
   322  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   323  		gp.stackguard0 = stackPreempt
   324  	}
   325  }
   326  
   327  // mutexSampleContention returns whether the current mutex operation should
   328  // report any contention it discovers.
   329  func mutexSampleContention() bool {
   330  	if rate := int64(atomic.Load64(&mutexprofilerate)); rate <= 0 {
   331  		return false
   332  	} else {
   333  		// TODO: have SetMutexProfileFraction do the clamping
   334  		rate32 := uint32(rate)
   335  		if int64(rate32) != rate {
   336  			rate32 = ^uint32(0)
   337  		}
   338  		return cheaprandn(rate32) == 0
   339  	}
   340  }
   341  
   342  // unlock2Wake updates the list of Ms waiting on l, waking an M if necessary.
   343  //
   344  //go:nowritebarrier
   345  func unlock2Wake(l *mutex, haveStackLock bool, endTicks int64) {
   346  	v := atomic.Loaduintptr(&l.key)
   347  
   348  	// On occasion, seek out and wake the M at the bottom of the stack so it
   349  	// doesn't starve.
   350  	antiStarve := cheaprandn(mutexTailWakePeriod) == 0
   351  
   352  	if haveStackLock {
   353  		goto useStackLock
   354  	}
   355  
   356  	if !(antiStarve || // avoiding starvation may require a wake
   357  		v&mutexSpinning == 0 || // no spinners means we must wake
   358  		mutexPreferLowLatency(l)) { // prefer waiters be awake as much as possible
   359  		return
   360  	}
   361  
   362  	for {
   363  		if v&^mutexMMask == 0 || v&mutexStackLocked != 0 {
   364  			// No waiting Ms means nothing to do.
   365  			//
   366  			// If the stack lock is unavailable, its owner would make the same
   367  			// wake decisions that we would, so there's nothing for us to do.
   368  			//
   369  			// Although: This thread may have a different call stack, which
   370  			// would result in a different entry in the mutex contention profile
   371  			// (upon completion of go.dev/issue/66999). That could lead to weird
   372  			// results if a slow critical section ends but another thread
   373  			// quickly takes the lock, finishes its own critical section,
   374  			// releases the lock, and then grabs the stack lock. That quick
   375  			// thread would then take credit (blame) for the delay that this
   376  			// slow thread caused. The alternative is to have more expensive
   377  			// atomic operations (a CAS) on the critical path of unlock2.
   378  			return
   379  		}
   380  		// Other M's are waiting for the lock.
   381  		// Obtain the stack lock, and pop off an M.
   382  		next := v | mutexStackLocked
   383  		if atomic.Casuintptr(&l.key, v, next) {
   384  			break
   385  		}
   386  		v = atomic.Loaduintptr(&l.key)
   387  	}
   388  
   389  	// We own the mutexStackLocked flag. New Ms may push themselves onto the
   390  	// stack concurrently, but we're now the only thread that can remove or
   391  	// modify the Ms that are sleeping in the list.
   392  useStackLock:
   393  
   394  	if endTicks != 0 {
   395  		// Find the M at the bottom of the stack of waiters, which has been
   396  		// asleep for the longest. Take the average of its wait time and the
   397  		// head M's wait time for the mutex contention profile, matching the
   398  		// estimate we do in semrelease1 (for sync.Mutex contention).
   399  		//
   400  		// We don't keep track of the tail node (we don't need it often), so do
   401  		// an O(N) walk on the list of sleeping Ms to find it.
   402  		head := mutexWaitListHead(v).ptr()
   403  		for node, n := head, 0; ; {
   404  			n++
   405  			next := node.mWaitList.next.ptr()
   406  			if next == nil {
   407  				cycles := ((endTicks - head.mWaitList.startTicks) + (endTicks - node.mWaitList.startTicks)) / 2
   408  				node.mWaitList.startTicks = endTicks
   409  				head.mWaitList.startTicks = endTicks
   410  				getg().m.mLockProfile.recordUnlock(cycles * int64(n))
   411  				break
   412  			}
   413  			node = next
   414  		}
   415  	}
   416  
   417  	var committed *m // If we choose an M within the stack, we've made a promise to wake it
   418  	for {
   419  		headM := v &^ mutexMMask
   420  		flags := v & (mutexMMask &^ mutexStackLocked) // preserve low bits, but release stack lock
   421  
   422  		mp := mutexWaitListHead(v).ptr()
   423  		wakem := committed
   424  		if committed == nil {
   425  			if v&mutexSpinning == 0 || mutexPreferLowLatency(l) {
   426  				wakem = mp
   427  			}
   428  			if antiStarve {
   429  				// Wake the M at the bottom of the stack of waiters. (This is
   430  				// O(N) with the number of waiters.)
   431  				wakem = mp
   432  				prev := mp
   433  				for {
   434  					next := wakem.mWaitList.next.ptr()
   435  					if next == nil {
   436  						break
   437  					}
   438  					prev, wakem = wakem, next
   439  				}
   440  				if wakem != mp {
   441  					committed = wakem
   442  					prev.mWaitList.next = wakem.mWaitList.next
   443  					// An M sets its own startTicks when it first goes to sleep.
   444  					// When an unlock operation is sampled for the mutex
   445  					// contention profile, it takes blame for the entire list of
   446  					// waiting Ms but only updates the startTicks value at the
   447  					// tail. Copy any updates to the next-oldest M.
   448  					prev.mWaitList.startTicks = wakem.mWaitList.startTicks
   449  				}
   450  			}
   451  		}
   452  
   453  		if wakem == mp {
   454  			headM = uintptr(mp.mWaitList.next) &^ mutexMMask
   455  		}
   456  
   457  		next := headM | flags
   458  		if atomic.Casuintptr(&l.key, v, next) {
   459  			if wakem != nil {
   460  				// Claimed an M. Wake it.
   461  				semawakeup(wakem)
   462  			}
   463  			return
   464  		}
   465  
   466  		v = atomic.Loaduintptr(&l.key)
   467  	}
   468  }
   469  

View as plain text