Source file src/sync/pool.go
1 // Copyright 2013 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "internal/race" 9 "runtime" 10 "sync/atomic" 11 "unsafe" 12 ) 13 14 // A Pool is a set of temporary objects that may be individually saved and 15 // retrieved. 16 // 17 // Any item stored in the Pool may be removed automatically at any time without 18 // notification. If the Pool holds the only reference when this happens, the 19 // item might be deallocated. 20 // 21 // A Pool is safe for use by multiple goroutines simultaneously. 22 // 23 // Pool's purpose is to cache allocated but unused items for later reuse, 24 // relieving pressure on the garbage collector. That is, it makes it easy to 25 // build efficient, thread-safe free lists. However, it is not suitable for all 26 // free lists. 27 // 28 // An appropriate use of a Pool is to manage a group of temporary items 29 // silently shared among and potentially reused by concurrent independent 30 // clients of a package. Pool provides a way to amortize allocation overhead 31 // across many clients. 32 // 33 // An example of good use of a Pool is in the fmt package, which maintains a 34 // dynamically-sized store of temporary output buffers. The store scales under 35 // load (when many goroutines are actively printing) and shrinks when 36 // quiescent. 37 // 38 // On the other hand, a free list maintained as part of a short-lived object is 39 // not a suitable use for a Pool, since the overhead does not amortize well in 40 // that scenario. It is more efficient to have such objects implement their own 41 // free list. 42 // 43 // A Pool must not be copied after first use. 44 // 45 // In the terminology of the Go memory model, a call to Put(x) “synchronizes before” 46 // a call to [Pool.Get] returning that same value x. 47 // Similarly, a call to New returning x “synchronizes before” 48 // a call to Get returning that same value x. 49 type Pool struct { 50 noCopy noCopy 51 52 local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal 53 localSize uintptr // size of the local array 54 55 victim unsafe.Pointer // local from previous cycle 56 victimSize uintptr // size of victims array 57 58 // New optionally specifies a function to generate 59 // a value when Get would otherwise return nil. 60 // It may not be changed concurrently with calls to Get. 61 New func() any 62 } 63 64 // Local per-P Pool appendix. 65 type poolLocalInternal struct { 66 private any // Can be used only by the respective P. 67 shared poolChain // Local P can pushHead/popHead; any P can popTail. 68 } 69 70 type poolLocal struct { 71 poolLocalInternal 72 73 // Prevents false sharing on widespread platforms with 74 // 128 mod (cache line size) = 0 . 75 pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte 76 } 77 78 // from runtime 79 // 80 //go:linkname runtime_randn runtime.randn 81 func runtime_randn(n uint32) uint32 82 83 var poolRaceHash [128]uint64 84 85 // poolRaceAddr returns an address to use as the synchronization point 86 // for race detector logic. We don't use the actual pointer stored in x 87 // directly, for fear of conflicting with other synchronization on that address. 88 // Instead, we hash the pointer to get an index into poolRaceHash. 89 // See discussion on golang.org/cl/31589. 90 func poolRaceAddr(x any) unsafe.Pointer { 91 ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1]) 92 h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16) 93 return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))]) 94 } 95 96 // Put adds x to the pool. 97 func (p *Pool) Put(x any) { 98 if x == nil { 99 return 100 } 101 if race.Enabled { 102 if runtime_randn(4) == 0 { 103 // Randomly drop x on floor. 104 return 105 } 106 race.ReleaseMerge(poolRaceAddr(x)) 107 race.Disable() 108 } 109 l, _ := p.pin() 110 if l.private == nil { 111 l.private = x 112 } else { 113 l.shared.pushHead(x) 114 } 115 runtime_procUnpin() 116 if race.Enabled { 117 race.Enable() 118 } 119 } 120 121 // Get selects an arbitrary item from the [Pool], removes it from the 122 // Pool, and returns it to the caller. 123 // Get may choose to ignore the pool and treat it as empty. 124 // Callers should not assume any relation between values passed to [Pool.Put] and 125 // the values returned by Get. 126 // 127 // If Get would otherwise return nil and p.New is non-nil, Get returns 128 // the result of calling p.New. 129 func (p *Pool) Get() any { 130 if race.Enabled { 131 race.Disable() 132 } 133 l, pid := p.pin() 134 x := l.private 135 l.private = nil 136 if x == nil { 137 // Try to pop the head of the local shard. We prefer 138 // the head over the tail for temporal locality of 139 // reuse. 140 x, _ = l.shared.popHead() 141 if x == nil { 142 x = p.getSlow(pid) 143 } 144 } 145 runtime_procUnpin() 146 if race.Enabled { 147 race.Enable() 148 if x != nil { 149 race.Acquire(poolRaceAddr(x)) 150 } 151 } 152 if x == nil && p.New != nil { 153 x = p.New() 154 } 155 return x 156 } 157 158 func (p *Pool) getSlow(pid int) any { 159 // See the comment in pin regarding ordering of the loads. 160 size := runtime_LoadAcquintptr(&p.localSize) // load-acquire 161 locals := p.local // load-consume 162 // Try to steal one element from other procs. 163 for i := 0; i < int(size); i++ { 164 l := indexLocal(locals, (pid+i+1)%int(size)) 165 if x, _ := l.shared.popTail(); x != nil { 166 return x 167 } 168 } 169 170 // Try the victim cache. We do this after attempting to steal 171 // from all primary caches because we want objects in the 172 // victim cache to age out if at all possible. 173 size = atomic.LoadUintptr(&p.victimSize) 174 if uintptr(pid) >= size { 175 return nil 176 } 177 locals = p.victim 178 l := indexLocal(locals, pid) 179 if x := l.private; x != nil { 180 l.private = nil 181 return x 182 } 183 for i := 0; i < int(size); i++ { 184 l := indexLocal(locals, (pid+i)%int(size)) 185 if x, _ := l.shared.popTail(); x != nil { 186 return x 187 } 188 } 189 190 // Mark the victim cache as empty for future gets don't bother 191 // with it. 192 atomic.StoreUintptr(&p.victimSize, 0) 193 194 return nil 195 } 196 197 // pin pins the current goroutine to P, disables preemption and 198 // returns poolLocal pool for the P and the P's id. 199 // Caller must call runtime_procUnpin() when done with the pool. 200 func (p *Pool) pin() (*poolLocal, int) { 201 // Check whether p is nil to get a panic. 202 // Otherwise the nil dereference happens while the m is pinned, 203 // causing a fatal error rather than a panic. 204 if p == nil { 205 panic("nil Pool") 206 } 207 208 pid := runtime_procPin() 209 // In pinSlow we store to local and then to localSize, here we load in opposite order. 210 // Since we've disabled preemption, GC cannot happen in between. 211 // Thus here we must observe local at least as large localSize. 212 // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). 213 s := runtime_LoadAcquintptr(&p.localSize) // load-acquire 214 l := p.local // load-consume 215 if uintptr(pid) < s { 216 return indexLocal(l, pid), pid 217 } 218 return p.pinSlow() 219 } 220 221 func (p *Pool) pinSlow() (*poolLocal, int) { 222 // Retry under the mutex. 223 // Can not lock the mutex while pinned. 224 runtime_procUnpin() 225 allPoolsMu.Lock() 226 defer allPoolsMu.Unlock() 227 pid := runtime_procPin() 228 // poolCleanup won't be called while we are pinned. 229 s := p.localSize 230 l := p.local 231 if uintptr(pid) < s { 232 return indexLocal(l, pid), pid 233 } 234 if p.local == nil { 235 allPools = append(allPools, p) 236 } 237 // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one. 238 size := runtime.GOMAXPROCS(0) 239 local := make([]poolLocal, size) 240 atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release 241 runtime_StoreReluintptr(&p.localSize, uintptr(size)) // store-release 242 return &local[pid], pid 243 } 244 245 func poolCleanup() { 246 // This function is called with the world stopped, at the beginning of a garbage collection. 247 // It must not allocate and probably should not call any runtime functions. 248 249 // Because the world is stopped, no pool user can be in a 250 // pinned section (in effect, this has all Ps pinned). 251 252 // Drop victim caches from all pools. 253 for _, p := range oldPools { 254 p.victim = nil 255 p.victimSize = 0 256 } 257 258 // Move primary cache to victim cache. 259 for _, p := range allPools { 260 p.victim = p.local 261 p.victimSize = p.localSize 262 p.local = nil 263 p.localSize = 0 264 } 265 266 // The pools with non-empty primary caches now have non-empty 267 // victim caches and no pools have primary caches. 268 oldPools, allPools = allPools, nil 269 } 270 271 var ( 272 allPoolsMu Mutex 273 274 // allPools is the set of pools that have non-empty primary 275 // caches. Protected by either 1) allPoolsMu and pinning or 2) 276 // STW. 277 allPools []*Pool 278 279 // oldPools is the set of pools that may have non-empty victim 280 // caches. Protected by STW. 281 oldPools []*Pool 282 ) 283 284 func init() { 285 runtime_registerPoolCleanup(poolCleanup) 286 } 287 288 func indexLocal(l unsafe.Pointer, i int) *poolLocal { 289 lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{})) 290 return (*poolLocal)(lp) 291 } 292 293 // Implemented in runtime. 294 func runtime_registerPoolCleanup(cleanup func()) 295 func runtime_procPin() int 296 func runtime_procUnpin() 297 298 // The below are implemented in internal/runtime/atomic and the 299 // compiler also knows to intrinsify the symbol we linkname into this 300 // package. 301 302 //go:linkname runtime_LoadAcquintptr internal/runtime/atomic.LoadAcquintptr 303 func runtime_LoadAcquintptr(ptr *uintptr) uintptr 304 305 //go:linkname runtime_StoreReluintptr internal/runtime/atomic.StoreReluintptr 306 func runtime_StoreReluintptr(ptr *uintptr, val uintptr) uintptr 307