Source file src/runtime/mcentral.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Central free lists.
     6  //
     7  // See malloc.go for an overview.
     8  //
     9  // The mcentral doesn't actually contain the list of free objects; the mspan does.
    10  // Each mcentral is two lists of mspans: those with free objects (c->nonempty)
    11  // and those that are completely allocated (c->empty).
    12  
    13  package runtime
    14  
    15  import (
    16  	"internal/runtime/atomic"
    17  	"internal/runtime/gc"
    18  	"internal/runtime/sys"
    19  )
    20  
    21  // Central list of free objects of a given size.
    22  type mcentral struct {
    23  	_         sys.NotInHeap
    24  	spanclass spanClass
    25  
    26  	// partial and full contain two mspan sets: one of swept in-use
    27  	// spans, and one of unswept in-use spans. These two trade
    28  	// roles on each GC cycle. The unswept set is drained either by
    29  	// allocation or by the background sweeper in every GC cycle,
    30  	// so only two roles are necessary.
    31  	//
    32  	// sweepgen is increased by 2 on each GC cycle, so the swept
    33  	// spans are in partial[sweepgen/2%2] and the unswept spans are in
    34  	// partial[1-sweepgen/2%2]. Sweeping pops spans from the
    35  	// unswept set and pushes spans that are still in-use on the
    36  	// swept set. Likewise, allocating an in-use span pushes it
    37  	// on the swept set.
    38  	//
    39  	// Some parts of the sweeper can sweep arbitrary spans, and hence
    40  	// can't remove them from the unswept set, but will add the span
    41  	// to the appropriate swept list. As a result, the parts of the
    42  	// sweeper and mcentral that do consume from the unswept list may
    43  	// encounter swept spans, and these should be ignored.
    44  	partial [2]spanSet // list of spans with a free object
    45  	full    [2]spanSet // list of spans with no free objects
    46  }
    47  
    48  // Initialize a single central free list.
    49  func (c *mcentral) init(spc spanClass) {
    50  	c.spanclass = spc
    51  	lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
    52  	lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
    53  	lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
    54  	lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
    55  }
    56  
    57  // partialUnswept returns the spanSet which holds partially-filled
    58  // unswept spans for this sweepgen.
    59  func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
    60  	return &c.partial[1-sweepgen/2%2]
    61  }
    62  
    63  // partialSwept returns the spanSet which holds partially-filled
    64  // swept spans for this sweepgen.
    65  func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
    66  	return &c.partial[sweepgen/2%2]
    67  }
    68  
    69  // fullUnswept returns the spanSet which holds unswept spans without any
    70  // free slots for this sweepgen.
    71  func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
    72  	return &c.full[1-sweepgen/2%2]
    73  }
    74  
    75  // fullSwept returns the spanSet which holds swept spans without any
    76  // free slots for this sweepgen.
    77  func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
    78  	return &c.full[sweepgen/2%2]
    79  }
    80  
    81  // Allocate a span to use in an mcache.
    82  func (c *mcentral) cacheSpan() *mspan {
    83  	// Deduct credit for this span allocation and sweep if necessary.
    84  	spanBytes := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) * pageSize
    85  	deductSweepCredit(spanBytes, 0)
    86  
    87  	traceDone := false
    88  	trace := traceAcquire()
    89  	if trace.ok() {
    90  		trace.GCSweepStart()
    91  		traceRelease(trace)
    92  	}
    93  
    94  	// If we sweep spanBudget spans without finding any free
    95  	// space, just allocate a fresh span. This limits the amount
    96  	// of time we can spend trying to find free space and
    97  	// amortizes the cost of small object sweeping over the
    98  	// benefit of having a full free span to allocate from. By
    99  	// setting this to 100, we limit the space overhead to 1%.
   100  	//
   101  	// TODO(austin,mknyszek): This still has bad worst-case
   102  	// throughput. For example, this could find just one free slot
   103  	// on the 100th swept span. That limits allocation latency, but
   104  	// still has very poor throughput. We could instead keep a
   105  	// running free-to-used budget and switch to fresh span
   106  	// allocation if the budget runs low.
   107  	spanBudget := 100
   108  
   109  	var s *mspan
   110  	var sl sweepLocker
   111  
   112  	// Try partial swept spans first.
   113  	sg := mheap_.sweepgen
   114  	if s = c.partialSwept(sg).pop(); s != nil {
   115  		goto havespan
   116  	}
   117  
   118  	sl = sweep.active.begin()
   119  	if sl.valid {
   120  		// Now try partial unswept spans.
   121  		for ; spanBudget >= 0; spanBudget-- {
   122  			s = c.partialUnswept(sg).pop()
   123  			if s == nil {
   124  				break
   125  			}
   126  			if s, ok := sl.tryAcquire(s); ok {
   127  				// We got ownership of the span, so let's sweep it and use it.
   128  				s.sweep(true)
   129  				sweep.active.end(sl)
   130  				goto havespan
   131  			}
   132  			// We failed to get ownership of the span, which means it's being or
   133  			// has been swept by an asynchronous sweeper that just couldn't remove it
   134  			// from the unswept list. That sweeper took ownership of the span and
   135  			// responsibility for either freeing it to the heap or putting it on the
   136  			// right swept list. Either way, we should just ignore it (and it's unsafe
   137  			// for us to do anything else).
   138  		}
   139  		// Now try full unswept spans, sweeping them and putting them into the
   140  		// right list if we fail to get a span.
   141  		for ; spanBudget >= 0; spanBudget-- {
   142  			s = c.fullUnswept(sg).pop()
   143  			if s == nil {
   144  				break
   145  			}
   146  			if s, ok := sl.tryAcquire(s); ok {
   147  				// We got ownership of the span, so let's sweep it.
   148  				s.sweep(true)
   149  				// Check if there's any free space.
   150  				freeIndex := s.nextFreeIndex()
   151  				if freeIndex != s.nelems {
   152  					s.freeindex = freeIndex
   153  					sweep.active.end(sl)
   154  					goto havespan
   155  				}
   156  				// Add it to the swept list, because sweeping didn't give us any free space.
   157  				c.fullSwept(sg).push(s.mspan)
   158  			}
   159  			// See comment for partial unswept spans.
   160  		}
   161  		sweep.active.end(sl)
   162  	}
   163  	trace = traceAcquire()
   164  	if trace.ok() {
   165  		trace.GCSweepDone()
   166  		traceDone = true
   167  		traceRelease(trace)
   168  	}
   169  
   170  	// We failed to get a span from the mcentral so get one from mheap.
   171  	s = c.grow()
   172  	if s == nil {
   173  		return nil
   174  	}
   175  
   176  	// At this point s is a span that should have free slots.
   177  havespan:
   178  	if !traceDone {
   179  		trace := traceAcquire()
   180  		if trace.ok() {
   181  			trace.GCSweepDone()
   182  			traceRelease(trace)
   183  		}
   184  	}
   185  	n := int(s.nelems) - int(s.allocCount)
   186  	if n == 0 || s.freeindex == s.nelems || s.allocCount == s.nelems {
   187  		throw("span has no free objects")
   188  	}
   189  	freeByteBase := s.freeindex &^ (64 - 1)
   190  	whichByte := freeByteBase / 8
   191  	// Init alloc bits cache.
   192  	s.refillAllocCache(whichByte)
   193  
   194  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   195  	// s.allocCache.
   196  	s.allocCache >>= s.freeindex % 64
   197  
   198  	return s
   199  }
   200  
   201  // Return span from an mcache.
   202  //
   203  // s must have a span class corresponding to this
   204  // mcentral and it must not be empty.
   205  func (c *mcentral) uncacheSpan(s *mspan) {
   206  	if s.allocCount == 0 {
   207  		throw("uncaching span but s.allocCount == 0")
   208  	}
   209  
   210  	sg := mheap_.sweepgen
   211  	stale := s.sweepgen == sg+1
   212  
   213  	// Fix up sweepgen.
   214  	if stale {
   215  		// Span was cached before sweep began. It's our
   216  		// responsibility to sweep it.
   217  		//
   218  		// Set sweepgen to indicate it's not cached but needs
   219  		// sweeping and can't be allocated from. sweep will
   220  		// set s.sweepgen to indicate s is swept.
   221  		atomic.Store(&s.sweepgen, sg-1)
   222  	} else {
   223  		// Indicate that s is no longer cached.
   224  		atomic.Store(&s.sweepgen, sg)
   225  	}
   226  
   227  	// Put the span in the appropriate place.
   228  	if stale {
   229  		// It's stale, so just sweep it. Sweeping will put it on
   230  		// the right list.
   231  		//
   232  		// We don't use a sweepLocker here. Stale cached spans
   233  		// aren't in the global sweep lists, so mark termination
   234  		// itself holds up sweep completion until all mcaches
   235  		// have been swept.
   236  		ss := sweepLocked{s}
   237  		ss.sweep(false)
   238  	} else {
   239  		if int(s.nelems)-int(s.allocCount) > 0 {
   240  			// Put it back on the partial swept list.
   241  			c.partialSwept(sg).push(s)
   242  		} else {
   243  			// There's no free space and it's not stale, so put it on the
   244  			// full swept list.
   245  			c.fullSwept(sg).push(s)
   246  		}
   247  	}
   248  }
   249  
   250  // grow allocates a new empty span from the heap and initializes it for c's size class.
   251  func (c *mcentral) grow() *mspan {
   252  	npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()])
   253  	s := mheap_.alloc(npages, c.spanclass)
   254  	if s == nil {
   255  		return nil
   256  	}
   257  	s.initHeapBits()
   258  	return s
   259  }
   260  

View as plain text