Source file src/runtime/lock_futex_tristate.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build (dragonfly || freebsd || linux) && !goexperiment.spinbitmutex
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  )
    12  
    13  // This implementation depends on OS-specific implementations of
    14  //
    15  //	futexsleep(addr *uint32, val uint32, ns int64)
    16  //		Atomically,
    17  //			if *addr == val { sleep }
    18  //		Might be woken up spuriously; that's allowed.
    19  //		Don't sleep longer than ns; ns < 0 means forever.
    20  //
    21  //	futexwakeup(addr *uint32, cnt uint32)
    22  //		If any procs are sleeping on addr, wake up at most cnt.
    23  
    24  const (
    25  	mutex_unlocked = 0
    26  	mutex_locked   = 1
    27  	mutex_sleeping = 2
    28  
    29  	active_spin     = 4
    30  	active_spin_cnt = 30
    31  	passive_spin    = 1
    32  )
    33  
    34  // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
    35  // mutex_sleeping means that there is presumably at least one sleeping thread.
    36  // Note that there can be spinning threads during all states - they do not
    37  // affect mutex's state.
    38  
    39  type mWaitList struct{}
    40  
    41  func lockVerifyMSize() {}
    42  
    43  func mutexContended(l *mutex) bool {
    44  	return atomic.Load(key32(&l.key)) > mutex_locked
    45  }
    46  
    47  func lock(l *mutex) {
    48  	lockWithRank(l, getLockRank(l))
    49  }
    50  
    51  func lock2(l *mutex) {
    52  	gp := getg()
    53  
    54  	if gp.m.locks < 0 {
    55  		throw("runtimeĀ·lock: lock count")
    56  	}
    57  	gp.m.locks++
    58  
    59  	// Speculative grab for lock.
    60  	v := atomic.Xchg(key32(&l.key), mutex_locked)
    61  	if v == mutex_unlocked {
    62  		return
    63  	}
    64  
    65  	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
    66  	// depending on whether there is a thread sleeping
    67  	// on this mutex. If we ever change l->key from
    68  	// MUTEX_SLEEPING to some other value, we must be
    69  	// careful to change it back to MUTEX_SLEEPING before
    70  	// returning, to ensure that the sleeping thread gets
    71  	// its wakeup call.
    72  	wait := v
    73  
    74  	timer := &lockTimer{lock: l}
    75  	timer.begin()
    76  	// On uniprocessors, no point spinning.
    77  	// On multiprocessors, spin for ACTIVE_SPIN attempts.
    78  	spin := 0
    79  	if ncpu > 1 {
    80  		spin = active_spin
    81  	}
    82  	for {
    83  		// Try for lock, spinning.
    84  		for i := 0; i < spin; i++ {
    85  			for l.key == mutex_unlocked {
    86  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    87  					timer.end()
    88  					return
    89  				}
    90  			}
    91  			procyield(active_spin_cnt)
    92  		}
    93  
    94  		// Try for lock, rescheduling.
    95  		for i := 0; i < passive_spin; i++ {
    96  			for l.key == mutex_unlocked {
    97  				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    98  					timer.end()
    99  					return
   100  				}
   101  			}
   102  			osyield()
   103  		}
   104  
   105  		// Sleep.
   106  		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
   107  		if v == mutex_unlocked {
   108  			timer.end()
   109  			return
   110  		}
   111  		wait = mutex_sleeping
   112  		futexsleep(key32(&l.key), mutex_sleeping, -1)
   113  	}
   114  }
   115  
   116  func unlock(l *mutex) {
   117  	unlockWithRank(l)
   118  }
   119  
   120  func unlock2(l *mutex) {
   121  	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
   122  	if v == mutex_unlocked {
   123  		throw("unlock of unlocked lock")
   124  	}
   125  	if v == mutex_sleeping {
   126  		futexwakeup(key32(&l.key), 1)
   127  	}
   128  
   129  	gp := getg()
   130  	gp.m.mLockProfile.recordUnlock(l)
   131  	gp.m.locks--
   132  	if gp.m.locks < 0 {
   133  		throw("runtimeĀ·unlock: lock count")
   134  	}
   135  	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   136  		gp.stackguard0 = stackPreempt
   137  	}
   138  }
   139  

View as plain text