Source file src/runtime/heapdump.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Implementation of runtime/debug.WriteHeapDump. Writes all
     6  // objects in the heap plus additional info (roots, threads,
     7  // finalizers, etc.) to a file.
     8  
     9  // The format of the dumped file is described at
    10  // https://golang.org/s/go15heapdump.
    11  
    12  package runtime
    13  
    14  import (
    15  	"internal/abi"
    16  	"internal/goarch"
    17  	"unsafe"
    18  )
    19  
    20  //go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
    21  func runtime_debug_WriteHeapDump(fd uintptr) {
    22  	stw := stopTheWorld(stwWriteHeapDump)
    23  
    24  	// Keep m on this G's stack instead of the system stack.
    25  	// Both readmemstats_m and writeheapdump_m have pretty large
    26  	// peak stack depths and we risk blowing the system stack.
    27  	// This is safe because the world is stopped, so we don't
    28  	// need to worry about anyone shrinking and therefore moving
    29  	// our stack.
    30  	var m MemStats
    31  	systemstack(func() {
    32  		// Call readmemstats_m here instead of deeper in
    33  		// writeheapdump_m because we might blow the system stack
    34  		// otherwise.
    35  		readmemstats_m(&m)
    36  		writeheapdump_m(fd, &m)
    37  	})
    38  
    39  	startTheWorld(stw)
    40  }
    41  
    42  const (
    43  	fieldKindEol       = 0
    44  	fieldKindPtr       = 1
    45  	fieldKindIface     = 2
    46  	fieldKindEface     = 3
    47  	tagEOF             = 0
    48  	tagObject          = 1
    49  	tagOtherRoot       = 2
    50  	tagType            = 3
    51  	tagGoroutine       = 4
    52  	tagStackFrame      = 5
    53  	tagParams          = 6
    54  	tagFinalizer       = 7
    55  	tagItab            = 8
    56  	tagOSThread        = 9
    57  	tagMemStats        = 10
    58  	tagQueuedFinalizer = 11
    59  	tagData            = 12
    60  	tagBSS             = 13
    61  	tagDefer           = 14
    62  	tagPanic           = 15
    63  	tagMemProf         = 16
    64  	tagAllocSample     = 17
    65  )
    66  
    67  var dumpfd uintptr // fd to write the dump to.
    68  var tmpbuf []byte
    69  
    70  // buffer of pending write data
    71  const (
    72  	bufSize = 4096
    73  )
    74  
    75  var buf [bufSize]byte
    76  var nbuf uintptr
    77  
    78  func dwrite(data unsafe.Pointer, len uintptr) {
    79  	if len == 0 {
    80  		return
    81  	}
    82  	if nbuf+len <= bufSize {
    83  		copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
    84  		nbuf += len
    85  		return
    86  	}
    87  
    88  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
    89  	if len >= bufSize {
    90  		write(dumpfd, data, int32(len))
    91  		nbuf = 0
    92  	} else {
    93  		copy(buf[:], (*[bufSize]byte)(data)[:len])
    94  		nbuf = len
    95  	}
    96  }
    97  
    98  func dwritebyte(b byte) {
    99  	dwrite(unsafe.Pointer(&b), 1)
   100  }
   101  
   102  func flush() {
   103  	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
   104  	nbuf = 0
   105  }
   106  
   107  // Cache of types that have been serialized already.
   108  // We use a type's hash field to pick a bucket.
   109  // Inside a bucket, we keep a list of types that
   110  // have been serialized so far, most recently used first.
   111  // Note: when a bucket overflows we may end up
   112  // serializing a type more than once. That's ok.
   113  const (
   114  	typeCacheBuckets = 256
   115  	typeCacheAssoc   = 4
   116  )
   117  
   118  type typeCacheBucket struct {
   119  	t [typeCacheAssoc]*_type
   120  }
   121  
   122  var typecache [typeCacheBuckets]typeCacheBucket
   123  
   124  // dump a uint64 in a varint format parseable by encoding/binary.
   125  func dumpint(v uint64) {
   126  	var buf [10]byte
   127  	var n int
   128  	for v >= 0x80 {
   129  		buf[n] = byte(v | 0x80)
   130  		n++
   131  		v >>= 7
   132  	}
   133  	buf[n] = byte(v)
   134  	n++
   135  	dwrite(unsafe.Pointer(&buf), uintptr(n))
   136  }
   137  
   138  func dumpbool(b bool) {
   139  	if b {
   140  		dumpint(1)
   141  	} else {
   142  		dumpint(0)
   143  	}
   144  }
   145  
   146  // dump varint uint64 length followed by memory contents.
   147  func dumpmemrange(data unsafe.Pointer, len uintptr) {
   148  	dumpint(uint64(len))
   149  	dwrite(data, len)
   150  }
   151  
   152  func dumpslice(b []byte) {
   153  	dumpint(uint64(len(b)))
   154  	if len(b) > 0 {
   155  		dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
   156  	}
   157  }
   158  
   159  func dumpstr(s string) {
   160  	dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
   161  }
   162  
   163  // dump information for a type.
   164  func dumptype(t *_type) {
   165  	if t == nil {
   166  		return
   167  	}
   168  
   169  	// If we've definitely serialized the type before,
   170  	// no need to do it again.
   171  	b := &typecache[t.Hash&(typeCacheBuckets-1)]
   172  	if t == b.t[0] {
   173  		return
   174  	}
   175  	for i := 1; i < typeCacheAssoc; i++ {
   176  		if t == b.t[i] {
   177  			// Move-to-front
   178  			for j := i; j > 0; j-- {
   179  				b.t[j] = b.t[j-1]
   180  			}
   181  			b.t[0] = t
   182  			return
   183  		}
   184  	}
   185  
   186  	// Might not have been dumped yet. Dump it and
   187  	// remember we did so.
   188  	for j := typeCacheAssoc - 1; j > 0; j-- {
   189  		b.t[j] = b.t[j-1]
   190  	}
   191  	b.t[0] = t
   192  
   193  	// dump the type
   194  	dumpint(tagType)
   195  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   196  	dumpint(uint64(t.Size_))
   197  	rt := toRType(t)
   198  	if x := t.Uncommon(); x == nil || rt.nameOff(x.PkgPath).Name() == "" {
   199  		dumpstr(rt.string())
   200  	} else {
   201  		pkgpath := rt.nameOff(x.PkgPath).Name()
   202  		name := rt.name()
   203  		dumpint(uint64(uintptr(len(pkgpath)) + 1 + uintptr(len(name))))
   204  		dwrite(unsafe.Pointer(unsafe.StringData(pkgpath)), uintptr(len(pkgpath)))
   205  		dwritebyte('.')
   206  		dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
   207  	}
   208  	dumpbool(t.Kind_&abi.KindDirectIface == 0 || t.PtrBytes != 0)
   209  }
   210  
   211  // dump an object.
   212  func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
   213  	dumpint(tagObject)
   214  	dumpint(uint64(uintptr(obj)))
   215  	dumpmemrange(obj, size)
   216  	dumpfields(bv)
   217  }
   218  
   219  func dumpotherroot(description string, to unsafe.Pointer) {
   220  	dumpint(tagOtherRoot)
   221  	dumpstr(description)
   222  	dumpint(uint64(uintptr(to)))
   223  }
   224  
   225  func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype) {
   226  	dumpint(tagFinalizer)
   227  	dumpint(uint64(uintptr(obj)))
   228  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   229  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   230  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   231  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   232  }
   233  
   234  type childInfo struct {
   235  	// Information passed up from the callee frame about
   236  	// the layout of the outargs region.
   237  	argoff uintptr   // where the arguments start in the frame
   238  	arglen uintptr   // size of args region
   239  	args   bitvector // if args.n >= 0, pointer map of args region
   240  	sp     *uint8    // callee sp
   241  	depth  uintptr   // depth in call stack (0 == most recent)
   242  }
   243  
   244  // dump kinds & offsets of interesting fields in bv.
   245  func dumpbv(cbv *bitvector, offset uintptr) {
   246  	for i := uintptr(0); i < uintptr(cbv.n); i++ {
   247  		if cbv.ptrbit(i) == 1 {
   248  			dumpint(fieldKindPtr)
   249  			dumpint(uint64(offset + i*goarch.PtrSize))
   250  		}
   251  	}
   252  }
   253  
   254  func dumpframe(s *stkframe, child *childInfo) {
   255  	f := s.fn
   256  
   257  	// Figure out what we can about our stack map
   258  	pc := s.pc
   259  	pcdata := int32(-1) // Use the entry map at function entry
   260  	if pc != f.entry() {
   261  		pc--
   262  		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, pc)
   263  	}
   264  	if pcdata == -1 {
   265  		// We do not have a valid pcdata value but there might be a
   266  		// stackmap for this function. It is likely that we are looking
   267  		// at the function prologue, assume so and hope for the best.
   268  		pcdata = 0
   269  	}
   270  	stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))
   271  
   272  	var bv bitvector
   273  	if stkmap != nil && stkmap.n > 0 {
   274  		bv = stackmapdata(stkmap, pcdata)
   275  	} else {
   276  		bv.n = -1
   277  	}
   278  
   279  	// Dump main body of stack frame.
   280  	dumpint(tagStackFrame)
   281  	dumpint(uint64(s.sp))                              // lowest address in frame
   282  	dumpint(uint64(child.depth))                       // # of frames deep on the stack
   283  	dumpint(uint64(uintptr(unsafe.Pointer(child.sp)))) // sp of child, or 0 if bottom of stack
   284  	dumpmemrange(unsafe.Pointer(s.sp), s.fp-s.sp)      // frame contents
   285  	dumpint(uint64(f.entry()))
   286  	dumpint(uint64(s.pc))
   287  	dumpint(uint64(s.continpc))
   288  	name := funcname(f)
   289  	if name == "" {
   290  		name = "unknown function"
   291  	}
   292  	dumpstr(name)
   293  
   294  	// Dump fields in the outargs section
   295  	if child.args.n >= 0 {
   296  		dumpbv(&child.args, child.argoff)
   297  	} else {
   298  		// conservative - everything might be a pointer
   299  		for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
   300  			dumpint(fieldKindPtr)
   301  			dumpint(uint64(off))
   302  		}
   303  	}
   304  
   305  	// Dump fields in the local vars section
   306  	if stkmap == nil {
   307  		// No locals information, dump everything.
   308  		for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
   309  			dumpint(fieldKindPtr)
   310  			dumpint(uint64(off))
   311  		}
   312  	} else if stkmap.n < 0 {
   313  		// Locals size information, dump just the locals.
   314  		size := uintptr(-stkmap.n)
   315  		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
   316  			dumpint(fieldKindPtr)
   317  			dumpint(uint64(off))
   318  		}
   319  	} else if stkmap.n > 0 {
   320  		// Locals bitmap information, scan just the pointers in
   321  		// locals.
   322  		dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
   323  	}
   324  	dumpint(fieldKindEol)
   325  
   326  	// Record arg info for parent.
   327  	child.argoff = s.argp - s.fp
   328  	child.arglen = s.argBytes()
   329  	child.sp = (*uint8)(unsafe.Pointer(s.sp))
   330  	child.depth++
   331  	stkmap = (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))
   332  	if stkmap != nil {
   333  		child.args = stackmapdata(stkmap, pcdata)
   334  	} else {
   335  		child.args.n = -1
   336  	}
   337  	return
   338  }
   339  
   340  func dumpgoroutine(gp *g) {
   341  	var sp, pc, lr uintptr
   342  	if gp.syscallsp != 0 {
   343  		sp = gp.syscallsp
   344  		pc = gp.syscallpc
   345  		lr = 0
   346  	} else {
   347  		sp = gp.sched.sp
   348  		pc = gp.sched.pc
   349  		lr = gp.sched.lr
   350  	}
   351  
   352  	dumpint(tagGoroutine)
   353  	dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   354  	dumpint(uint64(sp))
   355  	dumpint(gp.goid)
   356  	dumpint(uint64(gp.gopc))
   357  	dumpint(uint64(readgstatus(gp)))
   358  	dumpbool(isSystemGoroutine(gp, false))
   359  	dumpbool(false) // isbackground
   360  	dumpint(uint64(gp.waitsince))
   361  	dumpstr(gp.waitreason.String())
   362  	dumpint(uint64(uintptr(gp.sched.ctxt)))
   363  	dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
   364  	dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
   365  	dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
   366  
   367  	// dump stack
   368  	var child childInfo
   369  	child.args.n = -1
   370  	child.arglen = 0
   371  	child.sp = nil
   372  	child.depth = 0
   373  	var u unwinder
   374  	for u.initAt(pc, sp, lr, gp, 0); u.valid(); u.next() {
   375  		dumpframe(&u.frame, &child)
   376  	}
   377  
   378  	// dump defer & panic records
   379  	for d := gp._defer; d != nil; d = d.link {
   380  		dumpint(tagDefer)
   381  		dumpint(uint64(uintptr(unsafe.Pointer(d))))
   382  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   383  		dumpint(uint64(d.sp))
   384  		dumpint(uint64(d.pc))
   385  		fn := *(**funcval)(unsafe.Pointer(&d.fn))
   386  		dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   387  		if d.fn == nil {
   388  			// d.fn can be nil for open-coded defers
   389  			dumpint(uint64(0))
   390  		} else {
   391  			dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   392  		}
   393  		dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
   394  	}
   395  	for p := gp._panic; p != nil; p = p.link {
   396  		dumpint(tagPanic)
   397  		dumpint(uint64(uintptr(unsafe.Pointer(p))))
   398  		dumpint(uint64(uintptr(unsafe.Pointer(gp))))
   399  		eface := efaceOf(&p.arg)
   400  		dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
   401  		dumpint(uint64(uintptr(eface.data)))
   402  		dumpint(0) // was p->defer, no longer recorded
   403  		dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
   404  	}
   405  }
   406  
   407  func dumpgs() {
   408  	assertWorldStopped()
   409  
   410  	// goroutines & stacks
   411  	forEachG(func(gp *g) {
   412  		status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
   413  		switch status {
   414  		default:
   415  			print("runtime: unexpected G.status ", hex(status), "\n")
   416  			throw("dumpgs in STW - bad status")
   417  		case _Gdead:
   418  			// ok
   419  		case _Grunnable,
   420  			_Gsyscall,
   421  			_Gwaiting:
   422  			dumpgoroutine(gp)
   423  		}
   424  	})
   425  }
   426  
   427  func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype) {
   428  	dumpint(tagQueuedFinalizer)
   429  	dumpint(uint64(uintptr(obj)))
   430  	dumpint(uint64(uintptr(unsafe.Pointer(fn))))
   431  	dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
   432  	dumpint(uint64(uintptr(unsafe.Pointer(fint))))
   433  	dumpint(uint64(uintptr(unsafe.Pointer(ot))))
   434  }
   435  
   436  func dumproots() {
   437  	// To protect mheap_.allspans.
   438  	assertWorldStopped()
   439  
   440  	// TODO(mwhudson): dump datamask etc from all objects
   441  	// data segment
   442  	dumpint(tagData)
   443  	dumpint(uint64(firstmoduledata.data))
   444  	dumpmemrange(unsafe.Pointer(firstmoduledata.data), firstmoduledata.edata-firstmoduledata.data)
   445  	dumpfields(firstmoduledata.gcdatamask)
   446  
   447  	// bss segment
   448  	dumpint(tagBSS)
   449  	dumpint(uint64(firstmoduledata.bss))
   450  	dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss)
   451  	dumpfields(firstmoduledata.gcbssmask)
   452  
   453  	// mspan.types
   454  	for _, s := range mheap_.allspans {
   455  		if s.state.get() == mSpanInUse {
   456  			// Finalizers
   457  			for sp := s.specials; sp != nil; sp = sp.next {
   458  				if sp.kind != _KindSpecialFinalizer {
   459  					continue
   460  				}
   461  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   462  				p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
   463  				dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
   464  			}
   465  		}
   466  	}
   467  
   468  	// Finalizer queue
   469  	iterate_finq(finq_callback)
   470  }
   471  
   472  // Bit vector of free marks.
   473  // Needs to be as big as the largest number of objects per span.
   474  var freemark [_PageSize / 8]bool
   475  
   476  func dumpobjs() {
   477  	// To protect mheap_.allspans.
   478  	assertWorldStopped()
   479  
   480  	for _, s := range mheap_.allspans {
   481  		if s.state.get() != mSpanInUse {
   482  			continue
   483  		}
   484  		p := s.base()
   485  		size := s.elemsize
   486  		n := (s.npages << _PageShift) / size
   487  		if n > uintptr(len(freemark)) {
   488  			throw("freemark array doesn't have enough entries")
   489  		}
   490  
   491  		for freeIndex := uint16(0); freeIndex < s.nelems; freeIndex++ {
   492  			if s.isFree(uintptr(freeIndex)) {
   493  				freemark[freeIndex] = true
   494  			}
   495  		}
   496  
   497  		for j := uintptr(0); j < n; j, p = j+1, p+size {
   498  			if freemark[j] {
   499  				freemark[j] = false
   500  				continue
   501  			}
   502  			dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
   503  		}
   504  	}
   505  }
   506  
   507  func dumpparams() {
   508  	dumpint(tagParams)
   509  	x := uintptr(1)
   510  	if *(*byte)(unsafe.Pointer(&x)) == 1 {
   511  		dumpbool(false) // little-endian ptrs
   512  	} else {
   513  		dumpbool(true) // big-endian ptrs
   514  	}
   515  	dumpint(goarch.PtrSize)
   516  	var arenaStart, arenaEnd uintptr
   517  	for i1 := range mheap_.arenas {
   518  		if mheap_.arenas[i1] == nil {
   519  			continue
   520  		}
   521  		for i, ha := range mheap_.arenas[i1] {
   522  			if ha == nil {
   523  				continue
   524  			}
   525  			base := arenaBase(arenaIdx(i1)<<arenaL1Shift | arenaIdx(i))
   526  			if arenaStart == 0 || base < arenaStart {
   527  				arenaStart = base
   528  			}
   529  			if base+heapArenaBytes > arenaEnd {
   530  				arenaEnd = base + heapArenaBytes
   531  			}
   532  		}
   533  	}
   534  	dumpint(uint64(arenaStart))
   535  	dumpint(uint64(arenaEnd))
   536  	dumpstr(goarch.GOARCH)
   537  	dumpstr(buildVersion)
   538  	dumpint(uint64(ncpu))
   539  }
   540  
   541  func itab_callback(tab *itab) {
   542  	t := tab.Type
   543  	dumptype(t)
   544  	dumpint(tagItab)
   545  	dumpint(uint64(uintptr(unsafe.Pointer(tab))))
   546  	dumpint(uint64(uintptr(unsafe.Pointer(t))))
   547  }
   548  
   549  func dumpitabs() {
   550  	iterate_itabs(itab_callback)
   551  }
   552  
   553  func dumpms() {
   554  	for mp := allm; mp != nil; mp = mp.alllink {
   555  		dumpint(tagOSThread)
   556  		dumpint(uint64(uintptr(unsafe.Pointer(mp))))
   557  		dumpint(uint64(mp.id))
   558  		dumpint(mp.procid)
   559  	}
   560  }
   561  
   562  //go:systemstack
   563  func dumpmemstats(m *MemStats) {
   564  	assertWorldStopped()
   565  
   566  	// These ints should be identical to the exported
   567  	// MemStats structure and should be ordered the same
   568  	// way too.
   569  	dumpint(tagMemStats)
   570  	dumpint(m.Alloc)
   571  	dumpint(m.TotalAlloc)
   572  	dumpint(m.Sys)
   573  	dumpint(m.Lookups)
   574  	dumpint(m.Mallocs)
   575  	dumpint(m.Frees)
   576  	dumpint(m.HeapAlloc)
   577  	dumpint(m.HeapSys)
   578  	dumpint(m.HeapIdle)
   579  	dumpint(m.HeapInuse)
   580  	dumpint(m.HeapReleased)
   581  	dumpint(m.HeapObjects)
   582  	dumpint(m.StackInuse)
   583  	dumpint(m.StackSys)
   584  	dumpint(m.MSpanInuse)
   585  	dumpint(m.MSpanSys)
   586  	dumpint(m.MCacheInuse)
   587  	dumpint(m.MCacheSys)
   588  	dumpint(m.BuckHashSys)
   589  	dumpint(m.GCSys)
   590  	dumpint(m.OtherSys)
   591  	dumpint(m.NextGC)
   592  	dumpint(m.LastGC)
   593  	dumpint(m.PauseTotalNs)
   594  	for i := 0; i < 256; i++ {
   595  		dumpint(m.PauseNs[i])
   596  	}
   597  	dumpint(uint64(m.NumGC))
   598  }
   599  
   600  func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
   601  	stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
   602  	dumpint(tagMemProf)
   603  	dumpint(uint64(uintptr(unsafe.Pointer(b))))
   604  	dumpint(uint64(size))
   605  	dumpint(uint64(nstk))
   606  	for i := uintptr(0); i < nstk; i++ {
   607  		pc := stk[i]
   608  		f := findfunc(pc)
   609  		if !f.valid() {
   610  			var buf [64]byte
   611  			n := len(buf)
   612  			n--
   613  			buf[n] = ')'
   614  			if pc == 0 {
   615  				n--
   616  				buf[n] = '0'
   617  			} else {
   618  				for pc > 0 {
   619  					n--
   620  					buf[n] = "0123456789abcdef"[pc&15]
   621  					pc >>= 4
   622  				}
   623  			}
   624  			n--
   625  			buf[n] = 'x'
   626  			n--
   627  			buf[n] = '0'
   628  			n--
   629  			buf[n] = '('
   630  			dumpslice(buf[n:])
   631  			dumpstr("?")
   632  			dumpint(0)
   633  		} else {
   634  			dumpstr(funcname(f))
   635  			if i > 0 && pc > f.entry() {
   636  				pc--
   637  			}
   638  			file, line := funcline(f, pc)
   639  			dumpstr(file)
   640  			dumpint(uint64(line))
   641  		}
   642  	}
   643  	dumpint(uint64(allocs))
   644  	dumpint(uint64(frees))
   645  }
   646  
   647  func dumpmemprof() {
   648  	// To protect mheap_.allspans.
   649  	assertWorldStopped()
   650  
   651  	iterate_memprof(dumpmemprof_callback)
   652  	for _, s := range mheap_.allspans {
   653  		if s.state.get() != mSpanInUse {
   654  			continue
   655  		}
   656  		for sp := s.specials; sp != nil; sp = sp.next {
   657  			if sp.kind != _KindSpecialProfile {
   658  				continue
   659  			}
   660  			spp := (*specialprofile)(unsafe.Pointer(sp))
   661  			p := s.base() + uintptr(spp.special.offset)
   662  			dumpint(tagAllocSample)
   663  			dumpint(uint64(p))
   664  			dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
   665  		}
   666  	}
   667  }
   668  
   669  var dumphdr = []byte("go1.7 heap dump\n")
   670  
   671  func mdump(m *MemStats) {
   672  	assertWorldStopped()
   673  
   674  	// make sure we're done sweeping
   675  	for _, s := range mheap_.allspans {
   676  		if s.state.get() == mSpanInUse {
   677  			s.ensureSwept()
   678  		}
   679  	}
   680  	memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
   681  	dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
   682  	dumpparams()
   683  	dumpitabs()
   684  	dumpobjs()
   685  	dumpgs()
   686  	dumpms()
   687  	dumproots()
   688  	dumpmemstats(m)
   689  	dumpmemprof()
   690  	dumpint(tagEOF)
   691  	flush()
   692  }
   693  
   694  func writeheapdump_m(fd uintptr, m *MemStats) {
   695  	assertWorldStopped()
   696  
   697  	gp := getg()
   698  	casGToWaiting(gp.m.curg, _Grunning, waitReasonDumpingHeap)
   699  
   700  	// Set dump file.
   701  	dumpfd = fd
   702  
   703  	// Call dump routine.
   704  	mdump(m)
   705  
   706  	// Reset dump file.
   707  	dumpfd = 0
   708  	if tmpbuf != nil {
   709  		sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   710  		tmpbuf = nil
   711  	}
   712  
   713  	casgstatus(gp.m.curg, _Gwaiting, _Grunning)
   714  }
   715  
   716  // dumpint() the kind & offset of each field in an object.
   717  func dumpfields(bv bitvector) {
   718  	dumpbv(&bv, 0)
   719  	dumpint(fieldKindEol)
   720  }
   721  
   722  func makeheapobjbv(p uintptr, size uintptr) bitvector {
   723  	// Extend the temp buffer if necessary.
   724  	nptr := size / goarch.PtrSize
   725  	if uintptr(len(tmpbuf)) < nptr/8+1 {
   726  		if tmpbuf != nil {
   727  			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
   728  		}
   729  		n := nptr/8 + 1
   730  		p := sysAlloc(n, &memstats.other_sys)
   731  		if p == nil {
   732  			throw("heapdump: out of memory")
   733  		}
   734  		tmpbuf = (*[1 << 30]byte)(p)[:n]
   735  	}
   736  	// Convert heap bitmap to pointer bitmap.
   737  	clear(tmpbuf[:nptr/8+1])
   738  	s := spanOf(p)
   739  	tp := s.typePointersOf(p, size)
   740  	for {
   741  		var addr uintptr
   742  		if tp, addr = tp.next(p + size); addr == 0 {
   743  			break
   744  		}
   745  		i := (addr - p) / goarch.PtrSize
   746  		tmpbuf[i/8] |= 1 << (i % 8)
   747  	}
   748  	return bitvector{int32(nptr), &tmpbuf[0]}
   749  }
   750  

View as plain text