Source file src/runtime/mgcsweep.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: sweeping
     6  
     7  // The sweeper consists of two different algorithms:
     8  //
     9  // * The object reclaimer finds and frees unmarked slots in spans. It
    10  //   can free a whole span if none of the objects are marked, but that
    11  //   isn't its goal. This can be driven either synchronously by
    12  //   mcentral.cacheSpan for mcentral spans, or asynchronously by
    13  //   sweepone, which looks at all the mcentral lists.
    14  //
    15  // * The span reclaimer looks for spans that contain no marked objects
    16  //   and frees whole spans. This is a separate algorithm because
    17  //   freeing whole spans is the hardest task for the object reclaimer,
    18  //   but is critical when allocating new spans. The entry point for
    19  //   this is mheap_.reclaim and it's driven by a sequential scan of
    20  //   the page marks bitmap in the heap arenas.
    21  //
    22  // Both algorithms ultimately call mspan.sweep, which sweeps a single
    23  // heap span.
    24  
    25  package runtime
    26  
    27  import (
    28  	"internal/runtime/atomic"
    29  	"unsafe"
    30  )
    31  
    32  var sweep sweepdata
    33  
    34  // State of background sweep.
    35  type sweepdata struct {
    36  	lock   mutex
    37  	g      *g
    38  	parked bool
    39  
    40  	// active tracks outstanding sweepers and the sweep
    41  	// termination condition.
    42  	active activeSweep
    43  
    44  	// centralIndex is the current unswept span class.
    45  	// It represents an index into the mcentral span
    46  	// sets. Accessed and updated via its load and
    47  	// update methods. Not protected by a lock.
    48  	//
    49  	// Reset at mark termination.
    50  	// Used by mheap.nextSpanForSweep.
    51  	centralIndex sweepClass
    52  }
    53  
    54  // sweepClass is a spanClass and one bit to represent whether we're currently
    55  // sweeping partial or full spans.
    56  type sweepClass uint32
    57  
    58  const (
    59  	numSweepClasses            = numSpanClasses * 2
    60  	sweepClassDone  sweepClass = sweepClass(^uint32(0))
    61  )
    62  
    63  func (s *sweepClass) load() sweepClass {
    64  	return sweepClass(atomic.Load((*uint32)(s)))
    65  }
    66  
    67  func (s *sweepClass) update(sNew sweepClass) {
    68  	// Only update *s if its current value is less than sNew,
    69  	// since *s increases monotonically.
    70  	sOld := s.load()
    71  	for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
    72  		sOld = s.load()
    73  	}
    74  	// TODO(mknyszek): This isn't the only place we have
    75  	// an atomic monotonically increasing counter. It would
    76  	// be nice to have an "atomic max" which is just implemented
    77  	// as the above on most architectures. Some architectures
    78  	// like RISC-V however have native support for an atomic max.
    79  }
    80  
    81  func (s *sweepClass) clear() {
    82  	atomic.Store((*uint32)(s), 0)
    83  }
    84  
    85  // split returns the underlying span class as well as
    86  // whether we're interested in the full or partial
    87  // unswept lists for that class, indicated as a boolean
    88  // (true means "full").
    89  func (s sweepClass) split() (spc spanClass, full bool) {
    90  	return spanClass(s >> 1), s&1 == 0
    91  }
    92  
    93  // nextSpanForSweep finds and pops the next span for sweeping from the
    94  // central sweep buffers. It returns ownership of the span to the caller.
    95  // Returns nil if no such span exists.
    96  func (h *mheap) nextSpanForSweep() *mspan {
    97  	sg := h.sweepgen
    98  	for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
    99  		spc, full := sc.split()
   100  		c := &h.central[spc].mcentral
   101  		var s *mspan
   102  		if full {
   103  			s = c.fullUnswept(sg).pop()
   104  		} else {
   105  			s = c.partialUnswept(sg).pop()
   106  		}
   107  		if s != nil {
   108  			// Write down that we found something so future sweepers
   109  			// can start from here.
   110  			sweep.centralIndex.update(sc)
   111  			return s
   112  		}
   113  	}
   114  	// Write down that we found nothing.
   115  	sweep.centralIndex.update(sweepClassDone)
   116  	return nil
   117  }
   118  
   119  const sweepDrainedMask = 1 << 31
   120  
   121  // activeSweep is a type that captures whether sweeping
   122  // is done, and whether there are any outstanding sweepers.
   123  //
   124  // Every potential sweeper must call begin() before they look
   125  // for work, and end() after they've finished sweeping.
   126  type activeSweep struct {
   127  	// state is divided into two parts.
   128  	//
   129  	// The top bit (masked by sweepDrainedMask) is a boolean
   130  	// value indicating whether all the sweep work has been
   131  	// drained from the queue.
   132  	//
   133  	// The rest of the bits are a counter, indicating the
   134  	// number of outstanding concurrent sweepers.
   135  	state atomic.Uint32
   136  }
   137  
   138  // begin registers a new sweeper. Returns a sweepLocker
   139  // for acquiring spans for sweeping. Any outstanding sweeper blocks
   140  // sweep termination.
   141  //
   142  // If the sweepLocker is invalid, the caller can be sure that all
   143  // outstanding sweep work has been drained, so there is nothing left
   144  // to sweep. Note that there may be sweepers currently running, so
   145  // this does not indicate that all sweeping has completed.
   146  //
   147  // Even if the sweepLocker is invalid, its sweepGen is always valid.
   148  func (a *activeSweep) begin() sweepLocker {
   149  	for {
   150  		state := a.state.Load()
   151  		if state&sweepDrainedMask != 0 {
   152  			return sweepLocker{mheap_.sweepgen, false}
   153  		}
   154  		if a.state.CompareAndSwap(state, state+1) {
   155  			return sweepLocker{mheap_.sweepgen, true}
   156  		}
   157  	}
   158  }
   159  
   160  // end deregisters a sweeper. Must be called once for each time
   161  // begin is called if the sweepLocker is valid.
   162  func (a *activeSweep) end(sl sweepLocker) {
   163  	if sl.sweepGen != mheap_.sweepgen {
   164  		throw("sweeper left outstanding across sweep generations")
   165  	}
   166  	for {
   167  		state := a.state.Load()
   168  		if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
   169  			throw("mismatched begin/end of activeSweep")
   170  		}
   171  		if a.state.CompareAndSwap(state, state-1) {
   172  			if state-1 != sweepDrainedMask {
   173  				return
   174  			}
   175  			// We're the last sweeper, and there's nothing left to sweep.
   176  			if debug.gcpacertrace > 0 {
   177  				live := gcController.heapLive.Load()
   178  				print("pacer: sweep done at heap size ", live>>20, "MB; allocated ", (live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
   179  			}
   180  			// Now that sweeping is completely done, flush remaining cleanups.
   181  			gcCleanups.flush()
   182  			return
   183  		}
   184  	}
   185  }
   186  
   187  // markDrained marks the active sweep cycle as having drained
   188  // all remaining work. This is safe to be called concurrently
   189  // with all other methods of activeSweep, though may race.
   190  //
   191  // Returns true if this call was the one that actually performed
   192  // the mark.
   193  func (a *activeSweep) markDrained() bool {
   194  	for {
   195  		state := a.state.Load()
   196  		if state&sweepDrainedMask != 0 {
   197  			return false
   198  		}
   199  		if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
   200  			return true
   201  		}
   202  	}
   203  }
   204  
   205  // sweepers returns the current number of active sweepers.
   206  func (a *activeSweep) sweepers() uint32 {
   207  	return a.state.Load() &^ sweepDrainedMask
   208  }
   209  
   210  // isDone returns true if all sweep work has been drained and no more
   211  // outstanding sweepers exist. That is, when the sweep phase is
   212  // completely done.
   213  func (a *activeSweep) isDone() bool {
   214  	return a.state.Load() == sweepDrainedMask
   215  }
   216  
   217  // reset sets up the activeSweep for the next sweep cycle.
   218  //
   219  // The world must be stopped.
   220  func (a *activeSweep) reset() {
   221  	assertWorldStopped()
   222  	a.state.Store(0)
   223  }
   224  
   225  // finishsweep_m ensures that all spans are swept.
   226  //
   227  // The world must be stopped. This ensures there are no sweeps in
   228  // progress.
   229  //
   230  //go:nowritebarrier
   231  func finishsweep_m() {
   232  	assertWorldStopped()
   233  
   234  	// Sweeping must be complete before marking commences, so
   235  	// sweep any unswept spans. If this is a concurrent GC, there
   236  	// shouldn't be any spans left to sweep, so this should finish
   237  	// instantly. If GC was forced before the concurrent sweep
   238  	// finished, there may be spans to sweep.
   239  	for sweepone() != ^uintptr(0) {
   240  	}
   241  
   242  	// Make sure there aren't any outstanding sweepers left.
   243  	// At this point, with the world stopped, it means one of two
   244  	// things. Either we were able to preempt a sweeper, or that
   245  	// a sweeper didn't call sweep.active.end when it should have.
   246  	// Both cases indicate a bug, so throw.
   247  	if sweep.active.sweepers() != 0 {
   248  		throw("active sweepers found at start of mark phase")
   249  	}
   250  
   251  	// Reset all the unswept buffers, which should be empty.
   252  	// Do this in sweep termination as opposed to mark termination
   253  	// so that we can catch unswept spans and reclaim blocks as
   254  	// soon as possible.
   255  	sg := mheap_.sweepgen
   256  	for i := range mheap_.central {
   257  		c := &mheap_.central[i].mcentral
   258  		c.partialUnswept(sg).reset()
   259  		c.fullUnswept(sg).reset()
   260  	}
   261  
   262  	// Sweeping is done, so there won't be any new memory to
   263  	// scavenge for a bit.
   264  	//
   265  	// If the scavenger isn't already awake, wake it up. There's
   266  	// definitely work for it to do at this point.
   267  	scavenger.wake()
   268  
   269  	nextMarkBitArenaEpoch()
   270  }
   271  
   272  func bgsweep(c chan int) {
   273  	sweep.g = getg()
   274  
   275  	lockInit(&sweep.lock, lockRankSweep)
   276  	lock(&sweep.lock)
   277  	sweep.parked = true
   278  	c <- 1
   279  	goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
   280  
   281  	for {
   282  		// bgsweep attempts to be a "low priority" goroutine by intentionally
   283  		// yielding time. It's OK if it doesn't run, because goroutines allocating
   284  		// memory will sweep and ensure that all spans are swept before the next
   285  		// GC cycle. We really only want to run when we're idle.
   286  		//
   287  		// However, calling Gosched after each span swept produces a tremendous
   288  		// amount of tracing events, sometimes up to 50% of events in a trace. It's
   289  		// also inefficient to call into the scheduler so much because sweeping a
   290  		// single span is in general a very fast operation, taking as little as 30 ns
   291  		// on modern hardware. (See #54767.)
   292  		//
   293  		// As a result, bgsweep sweeps in batches, and only calls into the scheduler
   294  		// at the end of every batch. Furthermore, it only yields its time if there
   295  		// isn't spare idle time available on other cores. If there's available idle
   296  		// time, helping to sweep can reduce allocation latencies by getting ahead of
   297  		// the proportional sweeper and having spans ready to go for allocation.
   298  		const sweepBatchSize = 10
   299  		nSwept := 0
   300  		for sweepone() != ^uintptr(0) {
   301  			nSwept++
   302  			if nSwept%sweepBatchSize == 0 {
   303  				goschedIfBusy()
   304  			}
   305  		}
   306  		for freeSomeWbufs(true) {
   307  			// N.B. freeSomeWbufs is already batched internally.
   308  			goschedIfBusy()
   309  		}
   310  		lock(&sweep.lock)
   311  		if !isSweepDone() {
   312  			// This can happen if a GC runs between
   313  			// gosweepone returning ^0 above
   314  			// and the lock being acquired.
   315  			unlock(&sweep.lock)
   316  			// This goroutine must preempt when we have no work to do
   317  			// but isSweepDone returns false because of another existing sweeper.
   318  			// See issue #73499.
   319  			goschedIfBusy()
   320  			continue
   321  		}
   322  		sweep.parked = true
   323  		goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceBlockGCSweep, 1)
   324  	}
   325  }
   326  
   327  // sweepLocker acquires sweep ownership of spans.
   328  type sweepLocker struct {
   329  	// sweepGen is the sweep generation of the heap.
   330  	sweepGen uint32
   331  	valid    bool
   332  }
   333  
   334  // sweepLocked represents sweep ownership of a span.
   335  type sweepLocked struct {
   336  	*mspan
   337  }
   338  
   339  // tryAcquire attempts to acquire sweep ownership of span s. If it
   340  // successfully acquires ownership, it blocks sweep completion.
   341  func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
   342  	if !l.valid {
   343  		throw("use of invalid sweepLocker")
   344  	}
   345  	// Check before attempting to CAS.
   346  	if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
   347  		return sweepLocked{}, false
   348  	}
   349  	// Attempt to acquire sweep ownership of s.
   350  	if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
   351  		return sweepLocked{}, false
   352  	}
   353  	return sweepLocked{s}, true
   354  }
   355  
   356  // sweepone sweeps some unswept heap span and returns the number of pages returned
   357  // to the heap, or ^uintptr(0) if there was nothing to sweep.
   358  func sweepone() uintptr {
   359  	gp := getg()
   360  
   361  	// Increment locks to ensure that the goroutine is not preempted
   362  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
   363  	gp.m.locks++
   364  
   365  	// TODO(austin): sweepone is almost always called in a loop;
   366  	// lift the sweepLocker into its callers.
   367  	sl := sweep.active.begin()
   368  	if !sl.valid {
   369  		gp.m.locks--
   370  		return ^uintptr(0)
   371  	}
   372  
   373  	// Find a span to sweep.
   374  	npages := ^uintptr(0)
   375  	var noMoreWork bool
   376  	for {
   377  		s := mheap_.nextSpanForSweep()
   378  		if s == nil {
   379  			noMoreWork = sweep.active.markDrained()
   380  			break
   381  		}
   382  		if state := s.state.get(); state != mSpanInUse {
   383  			// This can happen if direct sweeping already
   384  			// swept this span, but in that case the sweep
   385  			// generation should always be up-to-date.
   386  			if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
   387  				print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
   388  				throw("non in-use span in unswept list")
   389  			}
   390  			continue
   391  		}
   392  		if s, ok := sl.tryAcquire(s); ok {
   393  			// Sweep the span we found.
   394  			npages = s.npages
   395  			if s.sweep(false) {
   396  				// Whole span was freed. Count it toward the
   397  				// page reclaimer credit since these pages can
   398  				// now be used for span allocation.
   399  				mheap_.reclaimCredit.Add(npages)
   400  			} else {
   401  				// Span is still in-use, so this returned no
   402  				// pages to the heap and the span needs to
   403  				// move to the swept in-use list.
   404  				npages = 0
   405  			}
   406  			break
   407  		}
   408  	}
   409  	sweep.active.end(sl)
   410  
   411  	if noMoreWork {
   412  		// The sweep list is empty. There may still be
   413  		// concurrent sweeps running, but we're at least very
   414  		// close to done sweeping.
   415  
   416  		// Move the scavenge gen forward (signaling
   417  		// that there's new work to do) and wake the scavenger.
   418  		//
   419  		// The scavenger is signaled by the last sweeper because once
   420  		// sweeping is done, we will definitely have useful work for
   421  		// the scavenger to do, since the scavenger only runs over the
   422  		// heap once per GC cycle. This update is not done during sweep
   423  		// termination because in some cases there may be a long delay
   424  		// between sweep done and sweep termination (e.g. not enough
   425  		// allocations to trigger a GC) which would be nice to fill in
   426  		// with scavenging work.
   427  		if debug.scavtrace > 0 {
   428  			systemstack(func() {
   429  				lock(&mheap_.lock)
   430  
   431  				// Get released stats.
   432  				releasedBg := mheap_.pages.scav.releasedBg.Load()
   433  				releasedEager := mheap_.pages.scav.releasedEager.Load()
   434  
   435  				// Print the line.
   436  				printScavTrace(releasedBg, releasedEager, false)
   437  
   438  				// Update the stats.
   439  				mheap_.pages.scav.releasedBg.Add(-releasedBg)
   440  				mheap_.pages.scav.releasedEager.Add(-releasedEager)
   441  				unlock(&mheap_.lock)
   442  			})
   443  		}
   444  		scavenger.ready()
   445  	}
   446  
   447  	gp.m.locks--
   448  	return npages
   449  }
   450  
   451  // isSweepDone reports whether all spans are swept.
   452  //
   453  // Note that this condition may transition from false to true at any
   454  // time as the sweeper runs. It may transition from true to false if a
   455  // GC runs; to prevent that the caller must be non-preemptible or must
   456  // somehow block GC progress.
   457  func isSweepDone() bool {
   458  	return sweep.active.isDone()
   459  }
   460  
   461  // Returns only when span s has been swept.
   462  //
   463  //go:nowritebarrier
   464  func (s *mspan) ensureSwept() {
   465  	// Caller must disable preemption.
   466  	// Otherwise when this function returns the span can become unswept again
   467  	// (if GC is triggered on another goroutine).
   468  	gp := getg()
   469  	if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
   470  		throw("mspan.ensureSwept: m is not locked")
   471  	}
   472  
   473  	// If this operation fails, then that means that there are
   474  	// no more spans to be swept. In this case, either s has already
   475  	// been swept, or is about to be acquired for sweeping and swept.
   476  	sl := sweep.active.begin()
   477  	if sl.valid {
   478  		// The caller must be sure that the span is a mSpanInUse span.
   479  		if s, ok := sl.tryAcquire(s); ok {
   480  			s.sweep(false)
   481  			sweep.active.end(sl)
   482  			return
   483  		}
   484  		sweep.active.end(sl)
   485  	}
   486  
   487  	// Unfortunately we can't sweep the span ourselves. Somebody else
   488  	// got to it first. We don't have efficient means to wait, but that's
   489  	// OK, it will be swept fairly soon.
   490  	for {
   491  		spangen := atomic.Load(&s.sweepgen)
   492  		if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
   493  			break
   494  		}
   495  		osyield()
   496  	}
   497  }
   498  
   499  // sweep frees or collects finalizers for blocks not marked in the mark phase.
   500  // It clears the mark bits in preparation for the next GC round.
   501  // Returns true if the span was returned to heap.
   502  // If preserve=true, don't return it to heap nor relink in mcentral lists;
   503  // caller takes care of it.
   504  func (sl *sweepLocked) sweep(preserve bool) bool {
   505  	// It's critical that we enter this function with preemption disabled,
   506  	// GC must not start while we are in the middle of this function.
   507  	gp := getg()
   508  	if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
   509  		throw("mspan.sweep: m is not locked")
   510  	}
   511  
   512  	s := sl.mspan
   513  	if !preserve {
   514  		// We'll release ownership of this span. Nil it out to
   515  		// prevent the caller from accidentally using it.
   516  		sl.mspan = nil
   517  	}
   518  
   519  	sweepgen := mheap_.sweepgen
   520  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
   521  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   522  		throw("mspan.sweep: bad span state")
   523  	}
   524  
   525  	trace := traceAcquire()
   526  	if trace.ok() {
   527  		trace.GCSweepSpan(s.npages * pageSize)
   528  		traceRelease(trace)
   529  	}
   530  
   531  	mheap_.pagesSwept.Add(int64(s.npages))
   532  
   533  	spc := s.spanclass
   534  	size := s.elemsize
   535  
   536  	// The allocBits indicate which unmarked objects don't need to be
   537  	// processed since they were free at the end of the last GC cycle
   538  	// and were not allocated since then.
   539  	// If the allocBits index is >= s.freeindex and the bit
   540  	// is not marked then the object remains unallocated
   541  	// since the last GC.
   542  	// This situation is analogous to being on a freelist.
   543  
   544  	// Unlink & free special records for any objects we're about to free.
   545  	// Two complications here:
   546  	// 1. An object can have both finalizer and profile special records.
   547  	//    In such case we need to queue finalizer for execution,
   548  	//    mark the object as live and preserve the profile special.
   549  	// 2. A tiny object can have several finalizers setup for different offsets.
   550  	//    If such object is not marked, we need to queue all finalizers at once.
   551  	// Both 1 and 2 are possible at the same time.
   552  	hadSpecials := s.specials != nil
   553  	siter := newSpecialsIter(s)
   554  	for siter.valid() {
   555  		// A finalizer can be set for an inner byte of an object, find object beginning.
   556  		objIndex := uintptr(siter.s.offset) / size
   557  		p := s.base() + objIndex*size
   558  		mbits := s.markBitsForIndex(objIndex)
   559  		if !mbits.isMarked() {
   560  			// This object is not marked and has at least one special record.
   561  			// Pass 1: see if it has a finalizer.
   562  			hasFinAndRevived := false
   563  			endOffset := p - s.base() + size
   564  			for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
   565  				if tmp.kind == _KindSpecialFinalizer {
   566  					// Stop freeing of object if it has a finalizer.
   567  					mbits.setMarkedNonAtomic()
   568  					hasFinAndRevived = true
   569  					break
   570  				}
   571  			}
   572  			if hasFinAndRevived {
   573  				// Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
   574  				// before finalization as specified by the weak package. See the documentation
   575  				// for that package for more details.
   576  				for siter.valid() && uintptr(siter.s.offset) < endOffset {
   577  					// Find the exact byte for which the special was setup
   578  					// (as opposed to object beginning).
   579  					special := siter.s
   580  					p := s.base() + uintptr(special.offset)
   581  					if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
   582  						siter.unlinkAndNext()
   583  						freeSpecial(special, unsafe.Pointer(p), size)
   584  					} else {
   585  						// All other specials only apply when an object is freed,
   586  						// so just keep the special record.
   587  						siter.next()
   588  					}
   589  				}
   590  			} else {
   591  				// Pass 2: the object is truly dead, free (and handle) all specials.
   592  				for siter.valid() && uintptr(siter.s.offset) < endOffset {
   593  					// Find the exact byte for which the special was setup
   594  					// (as opposed to object beginning).
   595  					special := siter.s
   596  					p := s.base() + uintptr(special.offset)
   597  					siter.unlinkAndNext()
   598  					freeSpecial(special, unsafe.Pointer(p), size)
   599  				}
   600  			}
   601  		} else {
   602  			// object is still live
   603  			if siter.s.kind == _KindSpecialReachable {
   604  				special := siter.unlinkAndNext()
   605  				(*specialReachable)(unsafe.Pointer(special)).reachable = true
   606  				freeSpecial(special, unsafe.Pointer(p), size)
   607  			} else {
   608  				// keep special record
   609  				siter.next()
   610  			}
   611  		}
   612  	}
   613  	if hadSpecials && s.specials == nil {
   614  		spanHasNoSpecials(s)
   615  	}
   616  
   617  	if traceAllocFreeEnabled() || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
   618  		// Find all newly freed objects.
   619  		mbits := s.markBitsForBase()
   620  		abits := s.allocBitsForIndex(0)
   621  		for i := uintptr(0); i < uintptr(s.nelems); i++ {
   622  			if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
   623  				x := s.base() + i*s.elemsize
   624  				if traceAllocFreeEnabled() {
   625  					trace := traceAcquire()
   626  					if trace.ok() {
   627  						trace.HeapObjectFree(x)
   628  						traceRelease(trace)
   629  					}
   630  				}
   631  				if debug.clobberfree != 0 {
   632  					clobberfree(unsafe.Pointer(x), size)
   633  				}
   634  				// User arenas are handled on explicit free.
   635  				if raceenabled && !s.isUserArenaChunk {
   636  					racefree(unsafe.Pointer(x), size)
   637  				}
   638  				if msanenabled && !s.isUserArenaChunk {
   639  					msanfree(unsafe.Pointer(x), size)
   640  				}
   641  				if asanenabled && !s.isUserArenaChunk {
   642  					asanpoison(unsafe.Pointer(x), size)
   643  				}
   644  				if valgrindenabled && !s.isUserArenaChunk {
   645  					valgrindFree(unsafe.Pointer(x))
   646  				}
   647  			}
   648  			mbits.advance()
   649  			abits.advance()
   650  		}
   651  	}
   652  
   653  	// Copy over the inline mark bits if necessary.
   654  	if gcUsesSpanInlineMarkBits(s.elemsize) {
   655  		s.mergeInlineMarks(s.gcmarkBits)
   656  	}
   657  
   658  	// Check for zombie objects.
   659  	if s.freeindex < s.nelems {
   660  		// Everything < freeindex is allocated and hence
   661  		// cannot be zombies.
   662  		//
   663  		// Check the first bitmap byte, where we have to be
   664  		// careful with freeindex.
   665  		obj := uintptr(s.freeindex)
   666  		if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
   667  			s.reportZombies()
   668  		}
   669  		// Check remaining bytes.
   670  		for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ {
   671  			if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
   672  				s.reportZombies()
   673  			}
   674  		}
   675  	}
   676  
   677  	// Count the number of free objects in this span.
   678  	nalloc := uint16(s.countAlloc())
   679  	nfreed := s.allocCount - nalloc
   680  	if nalloc > s.allocCount {
   681  		// The zombie check above should have caught this in
   682  		// more detail.
   683  		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
   684  		throw("sweep increased allocation count")
   685  	}
   686  
   687  	s.allocCount = nalloc
   688  	s.freeindex = 0 // reset allocation index to start of span.
   689  	s.freeIndexForScan = 0
   690  	if traceEnabled() {
   691  		getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize
   692  	}
   693  
   694  	// gcmarkBits becomes the allocBits.
   695  	// get a fresh cleared gcmarkBits in preparation for next GC
   696  	s.allocBits = s.gcmarkBits
   697  	s.gcmarkBits = newMarkBits(uintptr(s.nelems))
   698  
   699  	// refresh pinnerBits if they exists
   700  	if s.pinnerBits != nil {
   701  		s.refreshPinnerBits()
   702  	}
   703  
   704  	// Initialize alloc bits cache.
   705  	s.refillAllocCache(0)
   706  
   707  	// Reset the object queue, if we have one.
   708  	if gcUsesSpanInlineMarkBits(s.elemsize) {
   709  		s.initInlineMarkBits()
   710  	}
   711  
   712  	// The span must be in our exclusive ownership until we update sweepgen,
   713  	// check for potential races.
   714  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
   715  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
   716  		throw("mspan.sweep: bad span state after sweep")
   717  	}
   718  	if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
   719  		throw("swept cached span")
   720  	}
   721  
   722  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
   723  	// because of the potential for a concurrent free/SetFinalizer.
   724  	//
   725  	// But we need to set it before we make the span available for allocation
   726  	// (return it to heap or mcentral), because allocation code assumes that a
   727  	// span is already swept if available for allocation.
   728  	//
   729  	// Serialization point.
   730  	// At this point the mark bits are cleared and allocation ready
   731  	// to go so release the span.
   732  	atomic.Store(&s.sweepgen, sweepgen)
   733  
   734  	if s.isUserArenaChunk {
   735  		if preserve {
   736  			// This is a case that should never be handled by a sweeper that
   737  			// preserves the span for reuse.
   738  			throw("sweep: tried to preserve a user arena span")
   739  		}
   740  		if nalloc > 0 {
   741  			// There still exist pointers into the span or the span hasn't been
   742  			// freed yet. It's not ready to be reused. Put it back on the
   743  			// full swept list for the next cycle.
   744  			mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   745  			return false
   746  		}
   747  
   748  		// It's only at this point that the sweeper doesn't actually need to look
   749  		// at this arena anymore, so subtract from pagesInUse now.
   750  		mheap_.pagesInUse.Add(-s.npages)
   751  		s.state.set(mSpanDead)
   752  
   753  		// The arena is ready to be recycled. Remove it from the quarantine list
   754  		// and place it on the ready list. Don't add it back to any sweep lists.
   755  		systemstack(func() {
   756  			// It's the arena code's responsibility to get the chunk on the quarantine
   757  			// list by the time all references to the chunk are gone.
   758  			if s.list != &mheap_.userArena.quarantineList {
   759  				throw("user arena span is on the wrong list")
   760  			}
   761  			lock(&mheap_.lock)
   762  			mheap_.userArena.quarantineList.remove(s)
   763  			mheap_.userArena.readyList.insert(s)
   764  			unlock(&mheap_.lock)
   765  		})
   766  		return false
   767  	}
   768  
   769  	if spc.sizeclass() != 0 {
   770  		// Handle spans for small objects.
   771  		if nfreed > 0 {
   772  			// Only mark the span as needing zeroing if we've freed any
   773  			// objects, because a fresh span that had been allocated into,
   774  			// wasn't totally filled, but then swept, still has all of its
   775  			// free slots zeroed.
   776  			s.needzero = 1
   777  			stats := memstats.heapStats.acquire()
   778  			atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
   779  			memstats.heapStats.release()
   780  
   781  			// Count the frees in the inconsistent, internal stats.
   782  			gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))
   783  		}
   784  		if !preserve {
   785  			// The caller may not have removed this span from whatever
   786  			// unswept set its on but taken ownership of the span for
   787  			// sweeping by updating sweepgen. If this span still is in
   788  			// an unswept set, then the mcentral will pop it off the
   789  			// set, check its sweepgen, and ignore it.
   790  			if nalloc == 0 {
   791  				// Free totally free span directly back to the heap.
   792  				mheap_.freeSpan(s)
   793  				return true
   794  			}
   795  			// Return span back to the right mcentral list.
   796  			if nalloc == s.nelems {
   797  				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   798  			} else {
   799  				mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
   800  			}
   801  		}
   802  	} else if !preserve {
   803  		// Handle spans for large objects.
   804  		if nfreed != 0 {
   805  			// Free large object span to heap.
   806  
   807  			// Count the free in the consistent, external stats.
   808  			//
   809  			// Do this before freeSpan, which might update heapStats' inHeap
   810  			// value. If it does so, then metrics that subtract object footprint
   811  			// from inHeap might overflow. See #67019.
   812  			stats := memstats.heapStats.acquire()
   813  			atomic.Xadd64(&stats.largeFreeCount, 1)
   814  			atomic.Xadd64(&stats.largeFree, int64(size))
   815  			memstats.heapStats.release()
   816  
   817  			// Count the free in the inconsistent, internal stats.
   818  			gcController.totalFree.Add(int64(size))
   819  
   820  			// NOTE(rsc,dvyukov): The original implementation of efence
   821  			// in CL 22060046 used sysFree instead of sysFault, so that
   822  			// the operating system would eventually give the memory
   823  			// back to us again, so that an efence program could run
   824  			// longer without running out of memory. Unfortunately,
   825  			// calling sysFree here without any kind of adjustment of the
   826  			// heap data structures means that when the memory does
   827  			// come back to us, we have the wrong metadata for it, either in
   828  			// the mspan structures or in the garbage collection bitmap.
   829  			// Using sysFault here means that the program will run out of
   830  			// memory fairly quickly in efence mode, but at least it won't
   831  			// have mysterious crashes due to confused memory reuse.
   832  			// It should be possible to switch back to sysFree if we also
   833  			// implement and then call some kind of mheap.deleteSpan.
   834  			if debug.efence > 0 {
   835  				s.limit = 0 // prevent mlookup from finding this span
   836  				sysFault(unsafe.Pointer(s.base()), size)
   837  			} else {
   838  				mheap_.freeSpan(s)
   839  			}
   840  			return true
   841  		}
   842  
   843  		// Add a large span directly onto the full+swept list.
   844  		mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
   845  	}
   846  	return false
   847  }
   848  
   849  // reportZombies reports any marked but free objects in s and throws.
   850  //
   851  // This generally means one of the following:
   852  //
   853  // 1. User code converted a pointer to a uintptr and then back
   854  // unsafely, and a GC ran while the uintptr was the only reference to
   855  // an object.
   856  //
   857  // 2. User code (or a compiler bug) constructed a bad pointer that
   858  // points to a free slot, often a past-the-end pointer.
   859  //
   860  // 3. The GC two cycles ago missed a pointer and freed a live object,
   861  // but it was still live in the last cycle, so this GC cycle found a
   862  // pointer to that object and marked it.
   863  func (s *mspan) reportZombies() {
   864  	printlock()
   865  	print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer or having race conditions? try -d=checkptr or -race)\n")
   866  	mbits := s.markBitsForBase()
   867  	abits := s.allocBitsForIndex(0)
   868  	for i := uintptr(0); i < uintptr(s.nelems); i++ {
   869  		addr := s.base() + i*s.elemsize
   870  		print(hex(addr))
   871  		alloc := i < uintptr(s.freeindex) || abits.isMarked()
   872  		if alloc {
   873  			print(" alloc")
   874  		} else {
   875  			print(" free ")
   876  		}
   877  		if mbits.isMarked() {
   878  			print(" marked  ")
   879  		} else {
   880  			print(" unmarked")
   881  		}
   882  		zombie := mbits.isMarked() && !alloc
   883  		if zombie {
   884  			print(" zombie")
   885  		}
   886  		print("\n")
   887  		if zombie {
   888  			length := s.elemsize
   889  			if length > 1024 {
   890  				length = 1024
   891  			}
   892  			hexdumpWords(addr, addr+length, nil)
   893  		}
   894  		mbits.advance()
   895  		abits.advance()
   896  	}
   897  	throw("found pointer to free object")
   898  }
   899  
   900  // deductSweepCredit deducts sweep credit for allocating a span of
   901  // size spanBytes. This must be performed *before* the span is
   902  // allocated to ensure the system has enough credit. If necessary, it
   903  // performs sweeping to prevent going in to debt. If the caller will
   904  // also sweep pages (e.g., for a large allocation), it can pass a
   905  // non-zero callerSweepPages to leave that many pages unswept.
   906  //
   907  // deductSweepCredit makes a worst-case assumption that all spanBytes
   908  // bytes of the ultimately allocated span will be available for object
   909  // allocation.
   910  //
   911  // deductSweepCredit is the core of the "proportional sweep" system.
   912  // It uses statistics gathered by the garbage collector to perform
   913  // enough sweeping so that all pages are swept during the concurrent
   914  // sweep phase between GC cycles.
   915  //
   916  // mheap_ must NOT be locked.
   917  func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
   918  	if mheap_.sweepPagesPerByte == 0 {
   919  		// Proportional sweep is done or disabled.
   920  		return
   921  	}
   922  
   923  	trace := traceAcquire()
   924  	if trace.ok() {
   925  		trace.GCSweepStart()
   926  		traceRelease(trace)
   927  	}
   928  
   929  	// Fix debt if necessary.
   930  retry:
   931  	sweptBasis := mheap_.pagesSweptBasis.Load()
   932  	live := gcController.heapLive.Load()
   933  	liveBasis := mheap_.sweepHeapLiveBasis
   934  	newHeapLive := spanBytes
   935  	if liveBasis < live {
   936  		// Only do this subtraction when we don't overflow. Otherwise, pagesTarget
   937  		// might be computed as something really huge, causing us to get stuck
   938  		// sweeping here until the next mark phase.
   939  		//
   940  		// Overflow can happen here if gcPaceSweeper is called concurrently with
   941  		// sweeping (i.e. not during a STW, like it usually is) because this code
   942  		// is intentionally racy. A concurrent call to gcPaceSweeper can happen
   943  		// if a GC tuning parameter is modified and we read an older value of
   944  		// heapLive than what was used to set the basis.
   945  		//
   946  		// This state should be transient, so it's fine to just let newHeapLive
   947  		// be a relatively small number. We'll probably just skip this attempt to
   948  		// sweep.
   949  		//
   950  		// See issue #57523.
   951  		newHeapLive += uintptr(live - liveBasis)
   952  	}
   953  	pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
   954  	for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
   955  		if sweepone() == ^uintptr(0) {
   956  			mheap_.sweepPagesPerByte = 0
   957  			break
   958  		}
   959  		if mheap_.pagesSweptBasis.Load() != sweptBasis {
   960  			// Sweep pacing changed. Recompute debt.
   961  			goto retry
   962  		}
   963  	}
   964  
   965  	trace = traceAcquire()
   966  	if trace.ok() {
   967  		trace.GCSweepDone()
   968  		traceRelease(trace)
   969  	}
   970  }
   971  
   972  // clobberfree sets the memory content at x to bad content, for debugging
   973  // purposes.
   974  func clobberfree(x unsafe.Pointer, size uintptr) {
   975  	// size (span.elemsize) is always a multiple of 4.
   976  	for i := uintptr(0); i < size; i += 4 {
   977  		*(*uint32)(add(x, i)) = 0xdeadbeef
   978  	}
   979  }
   980  
   981  // gcPaceSweeper updates the sweeper's pacing parameters.
   982  //
   983  // Must be called whenever the GC's pacing is updated.
   984  //
   985  // The world must be stopped, or mheap_.lock must be held.
   986  func gcPaceSweeper(trigger uint64) {
   987  	assertWorldStoppedOrLockHeld(&mheap_.lock)
   988  
   989  	// Update sweep pacing.
   990  	if isSweepDone() {
   991  		mheap_.sweepPagesPerByte = 0
   992  	} else {
   993  		// Concurrent sweep needs to sweep all of the in-use
   994  		// pages by the time the allocated heap reaches the GC
   995  		// trigger. Compute the ratio of in-use pages to sweep
   996  		// per byte allocated, accounting for the fact that
   997  		// some might already be swept.
   998  		heapLiveBasis := gcController.heapLive.Load()
   999  		heapDistance := int64(trigger) - int64(heapLiveBasis)
  1000  		// Add a little margin so rounding errors and
  1001  		// concurrent sweep are less likely to leave pages
  1002  		// unswept when GC starts.
  1003  		heapDistance -= 1024 * 1024
  1004  		if heapDistance < pageSize {
  1005  			// Avoid setting the sweep ratio extremely high
  1006  			heapDistance = pageSize
  1007  		}
  1008  		pagesSwept := mheap_.pagesSwept.Load()
  1009  		pagesInUse := mheap_.pagesInUse.Load()
  1010  		sweepDistancePages := int64(pagesInUse) - int64(pagesSwept)
  1011  		if sweepDistancePages <= 0 {
  1012  			mheap_.sweepPagesPerByte = 0
  1013  		} else {
  1014  			mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
  1015  			mheap_.sweepHeapLiveBasis = heapLiveBasis
  1016  			// Write pagesSweptBasis last, since this
  1017  			// signals concurrent sweeps to recompute
  1018  			// their debt.
  1019  			mheap_.pagesSweptBasis.Store(pagesSwept)
  1020  		}
  1021  	}
  1022  }
  1023  

View as plain text