Source file src/runtime/mcheckmark.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GC checkmarks
     6  //
     7  // In a concurrent garbage collector, one worries about failing to mark
     8  // a live object due to mutations without write barriers or bugs in the
     9  // collector implementation. As a sanity check, the GC has a 'checkmark'
    10  // mode that retraverses the object graph with the world stopped, to make
    11  // sure that everything that should be marked is marked.
    12  
    13  package runtime
    14  
    15  import (
    16  	"internal/goarch"
    17  	"internal/runtime/atomic"
    18  	"internal/runtime/sys"
    19  	"unsafe"
    20  )
    21  
    22  // A checkmarksMap stores the GC marks in "checkmarks" mode. It is a
    23  // per-arena bitmap with a bit for every word in the arena. The mark
    24  // is stored on the bit corresponding to the first word of the marked
    25  // allocation.
    26  type checkmarksMap struct {
    27  	_ sys.NotInHeap
    28  	b [heapArenaBytes / goarch.PtrSize / 8]uint8
    29  }
    30  
    31  // If useCheckmark is true, marking of an object uses the checkmark
    32  // bits instead of the standard mark bits.
    33  var useCheckmark = false
    34  
    35  // startCheckmarks prepares for the checkmarks phase.
    36  //
    37  // The world must be stopped.
    38  func startCheckmarks() {
    39  	assertWorldStopped()
    40  
    41  	// Clear all checkmarks.
    42  	clearCheckmarks := func(ai arenaIdx) {
    43  		arena := mheap_.arenas[ai.l1()][ai.l2()]
    44  		bitmap := arena.checkmarks
    45  
    46  		if bitmap == nil {
    47  			// Allocate bitmap on first use.
    48  			bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
    49  			if bitmap == nil {
    50  				throw("out of memory allocating checkmarks bitmap")
    51  			}
    52  			arena.checkmarks = bitmap
    53  		} else {
    54  			// Otherwise clear the existing bitmap.
    55  			clear(bitmap.b[:])
    56  		}
    57  	}
    58  	for _, ai := range mheap_.heapArenas {
    59  		clearCheckmarks(ai)
    60  	}
    61  	for _, ai := range mheap_.userArenaArenas {
    62  		clearCheckmarks(ai)
    63  	}
    64  
    65  	// Enable checkmarking.
    66  	useCheckmark = true
    67  }
    68  
    69  // endCheckmarks ends the checkmarks phase.
    70  func endCheckmarks() {
    71  	if gcMarkWorkAvailable(nil) {
    72  		throw("GC work not flushed")
    73  	}
    74  	useCheckmark = false
    75  }
    76  
    77  // setCheckmark throws if marking object is a checkmarks violation,
    78  // and otherwise sets obj's checkmark. It returns true if obj was
    79  // already checkmarked.
    80  func setCheckmark(obj, base, off uintptr, mbits markBits) bool {
    81  	if !mbits.isMarked() {
    82  		printlock()
    83  		print("runtime: checkmarks found unexpected unmarked object obj=", hex(obj), "\n")
    84  		print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
    85  
    86  		// Dump the source (base) object
    87  		gcDumpObject("base", base, off)
    88  
    89  		// Dump the object
    90  		gcDumpObject("obj", obj, ^uintptr(0))
    91  
    92  		getg().m.traceback = 2
    93  		throw("checkmark found unmarked object")
    94  	}
    95  	bytep, mask := getCheckmark(obj)
    96  	if bytep == nil {
    97  		return false
    98  	}
    99  	if atomic.Load8(bytep)&mask != 0 {
   100  		// Already checkmarked.
   101  		return true
   102  	}
   103  	atomic.Or8(bytep, mask)
   104  	return false
   105  }
   106  
   107  func getCheckmark(obj uintptr) (bytep *byte, mask uint8) {
   108  	ai := arenaIndex(obj)
   109  	arena := mheap_.arenas[ai.l1()][ai.l2()]
   110  	if arena == nil {
   111  		// Non-heap pointer.
   112  		return nil, 0
   113  	}
   114  	wordIdx := (obj - alignDown(obj, heapArenaBytes)) / goarch.PtrSize
   115  	arenaWord := wordIdx / 8
   116  	mask = byte(1 << (wordIdx % 8))
   117  	bytep = &arena.checkmarks.b[arenaWord]
   118  	return bytep, mask
   119  }
   120  
   121  // runCheckmark runs a full non-parallel, stop-the-world mark using
   122  // checkmark bits, to check that we didn't forget to mark anything
   123  // during the concurrent mark process.
   124  //
   125  // The world must be stopped to call runCheckmark.
   126  func runCheckmark(prepareRootSet func(*gcWork)) {
   127  	assertWorldStopped()
   128  
   129  	// Turn off gcwaiting because that will force
   130  	// gcDrain to return early if this goroutine
   131  	// happens to have its preemption flag set.
   132  	// This is fine because the world is stopped.
   133  	// Restore it after we're done just to be safe.
   134  	sched.gcwaiting.Store(false)
   135  	startCheckmarks()
   136  	gcResetMarkState()
   137  	gcw := &getg().m.p.ptr().gcw
   138  	prepareRootSet(gcw)
   139  	gcDrain(gcw, 0)
   140  	wbBufFlush1(getg().m.p.ptr())
   141  	gcw.dispose()
   142  	endCheckmarks()
   143  	sched.gcwaiting.Store(true)
   144  }
   145  
   146  // checkFinalizersAndCleanups uses checkmarks to check for potential issues
   147  // with the program's use of cleanups and finalizers.
   148  func checkFinalizersAndCleanups() {
   149  	assertWorldStopped()
   150  
   151  	const (
   152  		reportCycle = 1 << iota
   153  		reportTiny
   154  	)
   155  
   156  	// Find the arena and page index into that arena for this shard.
   157  	type report struct {
   158  		issues int
   159  		ptr    uintptr
   160  		sp     *special
   161  	}
   162  	var reports [50]report
   163  	var nreports int
   164  	var more bool
   165  	var lastTinyBlock uintptr
   166  
   167  	forEachSpecial(func(p uintptr, s *mspan, sp *special) bool {
   168  		// N.B. The tiny block specials are sorted first in the specials list.
   169  		if sp.kind == _KindSpecialTinyBlock {
   170  			lastTinyBlock = s.base() + sp.offset
   171  			return true
   172  		}
   173  
   174  		// We only care about finalizers and cleanups.
   175  		if sp.kind != _KindSpecialFinalizer && sp.kind != _KindSpecialCleanup {
   176  			return true
   177  		}
   178  
   179  		// Run a checkmark GC using this cleanup and/or finalizer as a root.
   180  		if debug.checkfinalizers > 1 {
   181  			print("Scan trace for cleanup/finalizer on ", hex(p), ":\n")
   182  		}
   183  		runCheckmark(func(gcw *gcWork) {
   184  			switch sp.kind {
   185  			case _KindSpecialFinalizer:
   186  				gcScanFinalizer((*specialfinalizer)(unsafe.Pointer(sp)), s, gcw)
   187  			case _KindSpecialCleanup:
   188  				gcScanCleanup((*specialCleanup)(unsafe.Pointer(sp)), gcw)
   189  			}
   190  		})
   191  		if debug.checkfinalizers > 1 {
   192  			println()
   193  		}
   194  
   195  		// Now check to see if the object the special is attached to was marked.
   196  		// The roots above do not directly mark p, so if it is marked, then p
   197  		// must be reachable from the finalizer and/or cleanup, preventing
   198  		// reclamation.
   199  		bytep, mask := getCheckmark(p)
   200  		if bytep == nil {
   201  			return true
   202  		}
   203  		var issues int
   204  		if atomic.Load8(bytep)&mask != 0 {
   205  			issues |= reportCycle
   206  		}
   207  		if p >= lastTinyBlock && p < lastTinyBlock+maxTinySize {
   208  			issues |= reportTiny
   209  		}
   210  		if issues != 0 {
   211  			if nreports >= len(reports) {
   212  				more = true
   213  				return false
   214  			}
   215  			reports[nreports] = report{issues, p, sp}
   216  			nreports++
   217  		}
   218  		return true
   219  	})
   220  
   221  	if nreports > 0 {
   222  		lastPtr := uintptr(0)
   223  		println("WARNING: LIKELY CLEANUP/FINALIZER ISSUES")
   224  		println()
   225  		for _, r := range reports[:nreports] {
   226  			var ctx *specialCheckFinalizer
   227  			var kind string
   228  			if r.sp.kind == _KindSpecialFinalizer {
   229  				kind = "finalizer"
   230  				ctx = getCleanupContext(r.ptr, 0)
   231  			} else {
   232  				kind = "cleanup"
   233  				ctx = getCleanupContext(r.ptr, ((*specialCleanup)(unsafe.Pointer(r.sp))).id)
   234  			}
   235  
   236  			// N.B. reports is sorted 'enough' that cleanups/finalizers on the same pointer will
   237  			// appear consecutively because the specials list is sorted.
   238  			if lastPtr != r.ptr {
   239  				if lastPtr != 0 {
   240  					println()
   241  				}
   242  				print("Value of type ", toRType(ctx.ptrType).string(), " at ", hex(r.ptr), "\n")
   243  				if r.issues&reportCycle != 0 {
   244  					if r.sp.kind == _KindSpecialFinalizer {
   245  						println("  is reachable from finalizer")
   246  					} else {
   247  						println("  is reachable from cleanup or cleanup argument")
   248  					}
   249  				}
   250  				if r.issues&reportTiny != 0 {
   251  					println("  is in a tiny block with other (possibly long-lived) values")
   252  				}
   253  				if r.issues&reportTiny != 0 && r.issues&reportCycle != 0 {
   254  					if r.sp.kind == _KindSpecialFinalizer {
   255  						println("  may be in the same tiny block as finalizer")
   256  					} else {
   257  						println("  may be in the same tiny block as cleanup or cleanup argument")
   258  					}
   259  				}
   260  			}
   261  			println()
   262  
   263  			println("Has", kind, "at", hex(uintptr(unsafe.Pointer(r.sp))))
   264  			funcInfo := findfunc(ctx.funcPC)
   265  			if funcInfo.valid() {
   266  				file, line := funcline(funcInfo, ctx.funcPC)
   267  				print("  ", funcname(funcInfo), "()\n")
   268  				print("      ", file, ":", line, " +", hex(ctx.funcPC-funcInfo.entry()), "\n")
   269  			} else {
   270  				print("  <bad pc ", hex(ctx.funcPC), ">\n")
   271  			}
   272  
   273  			println("created at: ")
   274  			createInfo := findfunc(ctx.createPC)
   275  			if createInfo.valid() {
   276  				file, line := funcline(createInfo, ctx.createPC)
   277  				print("  ", funcname(createInfo), "()\n")
   278  				print("      ", file, ":", line, " +", hex(ctx.createPC-createInfo.entry()), "\n")
   279  			} else {
   280  				print("  <bad pc ", hex(ctx.createPC), ">\n")
   281  			}
   282  
   283  			lastPtr = r.ptr
   284  		}
   285  		println()
   286  		if more {
   287  			println("... too many potential issues ...")
   288  		}
   289  		throw("detected possible issues with cleanups and/or finalizers")
   290  	}
   291  }
   292  
   293  // forEachSpecial is an iterator over all specials.
   294  //
   295  // Used by debug.checkfinalizers.
   296  //
   297  // The world must be stopped.
   298  func forEachSpecial(yield func(p uintptr, s *mspan, sp *special) bool) {
   299  	assertWorldStopped()
   300  
   301  	// Find the arena and page index into that arena for this shard.
   302  	for _, ai := range mheap_.markArenas {
   303  		ha := mheap_.arenas[ai.l1()][ai.l2()]
   304  
   305  		// Construct slice of bitmap which we'll iterate over.
   306  		for i := range ha.pageSpecials[:] {
   307  			// Find set bits, which correspond to spans with specials.
   308  			specials := atomic.Load8(&ha.pageSpecials[i])
   309  			if specials == 0 {
   310  				continue
   311  			}
   312  			for j := uint(0); j < 8; j++ {
   313  				if specials&(1<<j) == 0 {
   314  					continue
   315  				}
   316  				// Find the span for this bit.
   317  				//
   318  				// This value is guaranteed to be non-nil because having
   319  				// specials implies that the span is in-use, and since we're
   320  				// currently marking we can be sure that we don't have to worry
   321  				// about the span being freed and re-used.
   322  				s := ha.spans[uint(i)*8+j]
   323  
   324  				// Lock the specials to prevent a special from being
   325  				// removed from the list while we're traversing it.
   326  				for sp := s.specials; sp != nil; sp = sp.next {
   327  					if !yield(s.base()+sp.offset, s, sp) {
   328  						return
   329  					}
   330  				}
   331  			}
   332  		}
   333  	}
   334  }
   335  

View as plain text