Source file src/runtime/lock_sema_tristate.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build (aix || darwin || netbsd || openbsd || plan9 || solaris || windows) && !goexperiment.spinbitmutex
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // This implementation depends on OS-specific implementations of
    15  //
    16  //	func semacreate(mp *m)
    17  //		Create a semaphore for mp, if it does not already have one.
    18  //
    19  //	func semasleep(ns int64) int32
    20  //		If ns < 0, acquire m's semaphore and return 0.
    21  //		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
    22  //		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
    23  //
    24  //	func semawakeup(mp *m)
    25  //		Wake up mp, which is or will soon be sleeping on its semaphore.
    26  const (
    27  	active_spin     = 4
    28  	active_spin_cnt = 30
    29  	passive_spin    = 1
    30  )
    31  
    32  // mWaitList is part of the M struct, and holds the list of Ms that are waiting
    33  // for a particular runtime.mutex.
    34  //
    35  // When an M is unable to immediately obtain a lock, it adds itself to the list
    36  // of Ms waiting for the lock. It does that via this struct's next field,
    37  // forming a singly-linked list with the mutex's key field pointing to the head
    38  // of the list.
    39  type mWaitList struct {
    40  	next muintptr // next m waiting for lock
    41  }
    42  
    43  func lockVerifyMSize() {}
    44  
    45  func mutexContended(l *mutex) bool {
    46  	return atomic.Loaduintptr(&l.key) > locked
    47  }
    48  
    49  func lock(l *mutex) {
    50  	lockWithRank(l, getLockRank(l))
    51  }
    52  
    53  func lock2(l *mutex) {
    54  	gp := getg()
    55  	if gp.m.locks < 0 {
    56  		throw("runtimeĀ·lock: lock count")
    57  	}
    58  	gp.m.locks++
    59  
    60  	// Speculative grab for lock.
    61  	if atomic.Casuintptr(&l.key, 0, locked) {
    62  		return
    63  	}
    64  	semacreate(gp.m)
    65  
    66  	timer := &lockTimer{lock: l}
    67  	timer.begin()
    68  	// On uniprocessor's, no point spinning.
    69  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    70  	spin := 0
    71  	if ncpu > 1 {
    72  		spin = active_spin
    73  	}
    74  Loop:
    75  	for i := 0; ; i++ {
    76  		v := atomic.Loaduintptr(&l.key)
    77  		if v&locked == 0 {
    78  			// Unlocked. Try to lock.
    79  			if atomic.Casuintptr(&l.key, v, v|locked) {
    80  				timer.end()
    81  				return
    82  			}
    83  			i = 0
    84  		}
    85  		if i < spin {
    86  			procyield(active_spin_cnt)
    87  		} else if i < spin+passive_spin {
    88  			osyield()
    89  		} else {
    90  			// Someone else has it.
    91  			// l.key points to a linked list of M's waiting
    92  			// for this lock, chained through m.mWaitList.next.
    93  			// Queue this M.
    94  			for {
    95  				gp.m.mWaitList.next = muintptr(v &^ locked)
    96  				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
    97  					break
    98  				}
    99  				v = atomic.Loaduintptr(&l.key)
   100  				if v&locked == 0 {
   101  					continue Loop
   102  				}
   103  			}
   104  			if v&locked != 0 {
   105  				// Queued. Wait.
   106  				semasleep(-1)
   107  				i = 0
   108  			}
   109  		}
   110  	}
   111  }
   112  
   113  func unlock(l *mutex) {
   114  	unlockWithRank(l)
   115  }
   116  
   117  // We might not be holding a p in this code.
   118  //
   119  //go:nowritebarrier
   120  func unlock2(l *mutex) {
   121  	gp := getg()
   122  	var mp *m
   123  	for {
   124  		v := atomic.Loaduintptr(&l.key)
   125  		if v == locked {
   126  			if atomic.Casuintptr(&l.key, locked, 0) {
   127  				break
   128  			}
   129  		} else {
   130  			// Other M's are waiting for the lock.
   131  			// Dequeue an M.
   132  			mp = muintptr(v &^ locked).ptr()
   133  			if atomic.Casuintptr(&l.key, v, uintptr(mp.mWaitList.next)) {
   134  				// Dequeued an M.  Wake it.
   135  				semawakeup(mp) // no use of mp after this point; it's awake
   136  				break
   137  			}
   138  		}
   139  	}
   140  	gp.m.mLockProfile.recordUnlock(l)
   141  	gp.m.locks--
   142  	if gp.m.locks < 0 {
   143  		throw("runtimeĀ·unlock: lock count")
   144  	}
   145  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   146  		gp.stackguard0 = stackPreempt
   147  	}
   148  }
   149  

View as plain text