Source file src/runtime/lfstack.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lock-free stack.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // lfstack is the head of a lock-free stack.
    15  //
    16  // The zero value of lfstack is an empty list.
    17  //
    18  // This stack is intrusive. Nodes must embed lfnode as the first field.
    19  //
    20  // The stack does not keep GC-visible pointers to nodes, so the caller
    21  // must ensure the nodes are allocated outside the Go heap.
    22  type lfstack uint64
    23  
    24  func (head *lfstack) push(node *lfnode) {
    25  	node.pushcnt++
    26  	new := lfstackPack(node, node.pushcnt)
    27  	for {
    28  		old := atomic.Load64((*uint64)(head))
    29  		node.next = old
    30  		if atomic.Cas64((*uint64)(head), old, new) {
    31  			break
    32  		}
    33  	}
    34  }
    35  
    36  func (head *lfstack) pop() unsafe.Pointer {
    37  	var backoff uint32
    38  	// TODO: tweak backoff parameters on other architectures.
    39  	if GOARCH == "arm64" {
    40  		backoff = 128
    41  	}
    42  	for {
    43  		old := atomic.Load64((*uint64)(head))
    44  		if old == 0 {
    45  			return nil
    46  		}
    47  		node := lfstackUnpack(old)
    48  		next := atomic.Load64(&node.next)
    49  		if atomic.Cas64((*uint64)(head), old, next) {
    50  			return unsafe.Pointer(node)
    51  		}
    52  
    53  		// Use a backoff approach to reduce demand to the shared memory location
    54  		// decreases memory contention and allows for other threads to make quicker
    55  		// progress.
    56  		// Read more in this Arm blog post:
    57  		// https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/multi-threaded-applications-arm
    58  		procyield(backoff)
    59  		// Increase backoff time.
    60  		backoff += backoff / 2
    61  
    62  	}
    63  }
    64  
    65  func (head *lfstack) empty() bool {
    66  	return atomic.Load64((*uint64)(head)) == 0
    67  }
    68  
    69  // lfnodeValidate panics if node is not a valid address for use with
    70  // lfstack.push. This only needs to be called when node is allocated.
    71  func lfnodeValidate(node *lfnode) {
    72  	if base, _, _ := findObject(uintptr(unsafe.Pointer(node)), 0, 0); base != 0 {
    73  		throw("lfstack node allocated from the heap")
    74  	}
    75  	lfstackPack(node, ^uintptr(0))
    76  }
    77  
    78  func lfstackPack(node *lfnode, cnt uintptr) uint64 {
    79  	return uint64(taggedPointerPack(unsafe.Pointer(node), cnt&(1<<tagBits-1)))
    80  }
    81  
    82  func lfstackUnpack(val uint64) *lfnode {
    83  	return (*lfnode)(taggedPointer(val).pointer())
    84  }
    85  

View as plain text