Source file src/runtime/mcache.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/runtime/atomic"
     9  	"internal/runtime/gc"
    10  	"internal/runtime/sys"
    11  	"unsafe"
    12  )
    13  
    14  // Per-thread (in Go, per-P) cache for small objects.
    15  // This includes a small object cache and local allocation stats.
    16  // No locking needed because it is per-thread (per-P).
    17  //
    18  // mcaches are allocated from non-GC'd memory, so any heap pointers
    19  // must be specially handled.
    20  type mcache struct {
    21  	_ sys.NotInHeap
    22  
    23  	// The following members are accessed on every malloc,
    24  	// so they are grouped here for better caching.
    25  	nextSample  int64   // trigger heap sample after allocating this many bytes
    26  	memProfRate int     // cached mem profile rate, used to detect changes
    27  	scanAlloc   uintptr // bytes of scannable heap allocated
    28  
    29  	// Allocator cache for tiny objects w/o pointers.
    30  	// See "Tiny allocator" comment in malloc.go.
    31  
    32  	// tiny points to the beginning of the current tiny block, or
    33  	// nil if there is no current tiny block.
    34  	//
    35  	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
    36  	// we handle it by clearing it in releaseAll during mark
    37  	// termination.
    38  	//
    39  	// tinyAllocs is the number of tiny allocations performed
    40  	// by the P that owns this mcache.
    41  	tiny       uintptr
    42  	tinyoffset uintptr
    43  	tinyAllocs uintptr
    44  
    45  	// The rest is not accessed on every malloc.
    46  
    47  	alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
    48  
    49  	stackcache [_NumStackOrders]stackfreelist
    50  
    51  	// flushGen indicates the sweepgen during which this mcache
    52  	// was last flushed. If flushGen != mheap_.sweepgen, the spans
    53  	// in this mcache are stale and need to the flushed so they
    54  	// can be swept. This is done in acquirep.
    55  	flushGen atomic.Uint32
    56  }
    57  
    58  // A gclink is a node in a linked list of blocks, like mlink,
    59  // but it is opaque to the garbage collector.
    60  // The GC does not trace the pointers during collection,
    61  // and the compiler does not emit write barriers for assignments
    62  // of gclinkptr values. Code should store references to gclinks
    63  // as gclinkptr, not as *gclink.
    64  type gclink struct {
    65  	next gclinkptr
    66  }
    67  
    68  // A gclinkptr is a pointer to a gclink, but it is opaque
    69  // to the garbage collector.
    70  type gclinkptr uintptr
    71  
    72  // ptr returns the *gclink form of p.
    73  // The result should be used for accessing fields, not stored
    74  // in other data structures.
    75  func (p gclinkptr) ptr() *gclink {
    76  	return (*gclink)(unsafe.Pointer(p))
    77  }
    78  
    79  type stackfreelist struct {
    80  	list gclinkptr // linked list of free stacks
    81  	size uintptr   // total size of stacks in list
    82  }
    83  
    84  // dummy mspan that contains no free objects.
    85  var emptymspan mspan
    86  
    87  func allocmcache() *mcache {
    88  	var c *mcache
    89  	systemstack(func() {
    90  		lock(&mheap_.lock)
    91  		c = (*mcache)(mheap_.cachealloc.alloc())
    92  		c.flushGen.Store(mheap_.sweepgen)
    93  		unlock(&mheap_.lock)
    94  	})
    95  	for i := range c.alloc {
    96  		c.alloc[i] = &emptymspan
    97  	}
    98  	c.nextSample = nextSample()
    99  	return c
   100  }
   101  
   102  // freemcache releases resources associated with this
   103  // mcache and puts the object onto a free list.
   104  //
   105  // In some cases there is no way to simply release
   106  // resources, such as statistics, so donate them to
   107  // a different mcache (the recipient).
   108  func freemcache(c *mcache) {
   109  	systemstack(func() {
   110  		c.releaseAll()
   111  		stackcache_clear(c)
   112  
   113  		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
   114  		// with the stealing of gcworkbufs during garbage collection to avoid
   115  		// a race where the workbuf is double-freed.
   116  		// gcworkbuffree(c.gcworkbuf)
   117  
   118  		lock(&mheap_.lock)
   119  		mheap_.cachealloc.free(unsafe.Pointer(c))
   120  		unlock(&mheap_.lock)
   121  	})
   122  }
   123  
   124  // getMCache is a convenience function which tries to obtain an mcache.
   125  //
   126  // Returns nil if we're not bootstrapping or we don't have a P. The caller's
   127  // P must not change, so we must be in a non-preemptible state.
   128  func getMCache(mp *m) *mcache {
   129  	// Grab the mcache, since that's where stats live.
   130  	pp := mp.p.ptr()
   131  	var c *mcache
   132  	if pp == nil {
   133  		// We will be called without a P while bootstrapping,
   134  		// in which case we use mcache0, which is set in mallocinit.
   135  		// mcache0 is cleared when bootstrapping is complete,
   136  		// by procresize.
   137  		c = mcache0
   138  	} else {
   139  		c = pp.mcache
   140  	}
   141  	return c
   142  }
   143  
   144  // refill acquires a new span of span class spc for c. This span will
   145  // have at least one free object. The current span in c must be full.
   146  //
   147  // Must run in a non-preemptible context since otherwise the owner of
   148  // c could change.
   149  func (c *mcache) refill(spc spanClass) {
   150  	// Return the current cached span to the central lists.
   151  	s := c.alloc[spc]
   152  
   153  	if s.allocCount != s.nelems {
   154  		throw("refill of span with free space remaining")
   155  	}
   156  	if s != &emptymspan {
   157  		// Mark this span as no longer cached.
   158  		if s.sweepgen != mheap_.sweepgen+3 {
   159  			throw("bad sweepgen in refill")
   160  		}
   161  		mheap_.central[spc].mcentral.uncacheSpan(s)
   162  
   163  		// Count up how many slots were used and record it.
   164  		stats := memstats.heapStats.acquire()
   165  		slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
   166  		atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed)
   167  
   168  		// Flush tinyAllocs.
   169  		if spc == tinySpanClass {
   170  			atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
   171  			c.tinyAllocs = 0
   172  		}
   173  		memstats.heapStats.release()
   174  
   175  		// Count the allocs in inconsistent, internal stats.
   176  		bytesAllocated := slotsUsed * int64(s.elemsize)
   177  		gcController.totalAlloc.Add(bytesAllocated)
   178  
   179  		// Clear the second allocCount just to be safe.
   180  		s.allocCountBeforeCache = 0
   181  	}
   182  
   183  	// Get a new cached span from the central lists.
   184  	s = mheap_.central[spc].mcentral.cacheSpan()
   185  	if s == nil {
   186  		throw("out of memory")
   187  	}
   188  
   189  	if s.allocCount == s.nelems {
   190  		throw("span has no free space")
   191  	}
   192  
   193  	// Indicate that this span is cached and prevent asynchronous
   194  	// sweeping in the next sweep phase.
   195  	s.sweepgen = mheap_.sweepgen + 3
   196  
   197  	// Store the current alloc count for accounting later.
   198  	s.allocCountBeforeCache = s.allocCount
   199  
   200  	// Update heapLive and flush scanAlloc.
   201  	//
   202  	// We have not yet allocated anything new into the span, but we
   203  	// assume that all of its slots will get used, so this makes
   204  	// heapLive an overestimate.
   205  	//
   206  	// When the span gets uncached, we'll fix up this overestimate
   207  	// if necessary (see releaseAll).
   208  	//
   209  	// We pick an overestimate here because an underestimate leads
   210  	// the pacer to believe that it's in better shape than it is,
   211  	// which appears to lead to more memory used. See #53738 for
   212  	// more details.
   213  	usedBytes := uintptr(s.allocCount) * s.elemsize
   214  	gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
   215  	c.scanAlloc = 0
   216  
   217  	c.alloc[spc] = s
   218  }
   219  
   220  // allocLarge allocates a span for a large object.
   221  func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
   222  	if size+pageSize < size {
   223  		throw("out of memory")
   224  	}
   225  	npages := size >> gc.PageShift
   226  	if size&pageMask != 0 {
   227  		npages++
   228  	}
   229  
   230  	// Deduct credit for this span allocation and sweep if
   231  	// necessary. mHeap_Alloc will also sweep npages, so this only
   232  	// pays the debt down to npage pages.
   233  	deductSweepCredit(npages*pageSize, npages)
   234  
   235  	spc := makeSpanClass(0, noscan)
   236  	s := mheap_.alloc(npages, spc)
   237  	if s == nil {
   238  		throw("out of memory")
   239  	}
   240  
   241  	// Count the alloc in consistent, external stats.
   242  	stats := memstats.heapStats.acquire()
   243  	atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize))
   244  	atomic.Xadd64(&stats.largeAllocCount, 1)
   245  	memstats.heapStats.release()
   246  
   247  	// Count the alloc in inconsistent, internal stats.
   248  	gcController.totalAlloc.Add(int64(npages * pageSize))
   249  
   250  	// Update heapLive.
   251  	gcController.update(int64(s.npages*pageSize), 0)
   252  
   253  	// Put the large span in the mcentral swept list so that it's
   254  	// visible to the background sweeper.
   255  	mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
   256  
   257  	// Adjust s.limit down to the object-containing part of the span.
   258  	//
   259  	// This is just to create a slightly tighter bound on the limit.
   260  	// It's totally OK if the garbage collector, in particular
   261  	// conservative scanning, can temporarily observes an inflated
   262  	// limit. It will simply mark the whole object or just skip it
   263  	// since we're in the mark phase anyway.
   264  	s.limit = s.base() + size
   265  	s.initHeapBits()
   266  	return s
   267  }
   268  
   269  func (c *mcache) releaseAll() {
   270  	// Take this opportunity to flush scanAlloc.
   271  	scanAlloc := int64(c.scanAlloc)
   272  	c.scanAlloc = 0
   273  
   274  	sg := mheap_.sweepgen
   275  	dHeapLive := int64(0)
   276  	for i := range c.alloc {
   277  		s := c.alloc[i]
   278  		if s != &emptymspan {
   279  			slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache)
   280  			s.allocCountBeforeCache = 0
   281  
   282  			// Adjust smallAllocCount for whatever was allocated.
   283  			stats := memstats.heapStats.acquire()
   284  			atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed)
   285  			memstats.heapStats.release()
   286  
   287  			// Adjust the actual allocs in inconsistent, internal stats.
   288  			// We assumed earlier that the full span gets allocated.
   289  			gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
   290  
   291  			if s.sweepgen != sg+1 {
   292  				// refill conservatively counted unallocated slots in gcController.heapLive.
   293  				// Undo this.
   294  				//
   295  				// If this span was cached before sweep, then gcController.heapLive was totally
   296  				// recomputed since caching this span, so we don't do this for stale spans.
   297  				dHeapLive -= int64(s.nelems-s.allocCount) * int64(s.elemsize)
   298  			}
   299  
   300  			// Release the span to the mcentral.
   301  			mheap_.central[i].mcentral.uncacheSpan(s)
   302  			c.alloc[i] = &emptymspan
   303  		}
   304  	}
   305  	// Clear tinyalloc pool.
   306  	c.tiny = 0
   307  	c.tinyoffset = 0
   308  
   309  	// Flush tinyAllocs.
   310  	stats := memstats.heapStats.acquire()
   311  	atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs))
   312  	c.tinyAllocs = 0
   313  	memstats.heapStats.release()
   314  
   315  	// Update heapLive and heapScan.
   316  	gcController.update(dHeapLive, scanAlloc)
   317  }
   318  
   319  // prepareForSweep flushes c if the system has entered a new sweep phase
   320  // since c was populated. This must happen between the sweep phase
   321  // starting and the first allocation from c.
   322  func (c *mcache) prepareForSweep() {
   323  	// Alternatively, instead of making sure we do this on every P
   324  	// between starting the world and allocating on that P, we
   325  	// could leave allocate-black on, allow allocation to continue
   326  	// as usual, use a ragged barrier at the beginning of sweep to
   327  	// ensure all cached spans are swept, and then disable
   328  	// allocate-black. However, with this approach it's difficult
   329  	// to avoid spilling mark bits into the *next* GC cycle.
   330  	sg := mheap_.sweepgen
   331  	flushGen := c.flushGen.Load()
   332  	if flushGen == sg {
   333  		return
   334  	} else if flushGen != sg-2 {
   335  		println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg)
   336  		throw("bad flushGen")
   337  	}
   338  	c.releaseAll()
   339  	stackcache_clear(c)
   340  	c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
   341  }
   342  

View as plain text