Source file
src/runtime/lfstack.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/runtime/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19
20
21
22 type lfstack uint64
23
24 func (head *lfstack) push(node *lfnode) {
25 node.pushcnt++
26 new := lfstackPack(node, node.pushcnt)
27 for {
28 old := atomic.Load64((*uint64)(head))
29 node.next = old
30 if atomic.Cas64((*uint64)(head), old, new) {
31 break
32 }
33 }
34 }
35
36 func (head *lfstack) pop() unsafe.Pointer {
37 var backoff uint32
38
39 if GOARCH == "arm64" {
40 backoff = 128
41 }
42 for {
43 old := atomic.Load64((*uint64)(head))
44 if old == 0 {
45 return nil
46 }
47 node := lfstackUnpack(old)
48 next := atomic.Load64(&node.next)
49 if atomic.Cas64((*uint64)(head), old, next) {
50 return unsafe.Pointer(node)
51 }
52
53
54
55
56
57
58 procyield(backoff)
59
60 backoff += backoff / 2
61
62 }
63 }
64
65 func (head *lfstack) empty() bool {
66 return atomic.Load64((*uint64)(head)) == 0
67 }
68
69
70
71 func lfnodeValidate(node *lfnode) {
72 if base, _, _ := findObject(uintptr(unsafe.Pointer(node)), 0, 0); base != 0 {
73 throw("lfstack node allocated from the heap")
74 }
75 lfstackPack(node, ^uintptr(0))
76 }
77
78 func lfstackPack(node *lfnode, cnt uintptr) uint64 {
79 return uint64(taggedPointerPack(unsafe.Pointer(node), cnt&(1<<tagBits-1)))
80 }
81
82 func lfstackUnpack(val uint64) *lfnode {
83 return (*lfnode)(taggedPointer(val).pointer())
84 }
85
View as plain text