Source file src/runtime/debuglog.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // This file provides an internal debug logging facility. The debug
     6  // log is a lightweight, in-memory, per-M ring buffer. By default, the
     7  // runtime prints the debug log on panic.
     8  //
     9  // To print something to the debug log, call dlog to obtain a dlogger
    10  // and use the methods on that to add values. The values will be
    11  // space-separated in the output (much like println).
    12  //
    13  // This facility can be enabled by passing -tags debuglog when
    14  // building. Without this tag, dlog calls compile to nothing.
    15  //
    16  // Implementation notes
    17  //
    18  // There are two implementations of the dlog interface: dloggerImpl and
    19  // dloggerFake. dloggerFake is a no-op implementation. dlogger is type-aliased
    20  // to one or the other depending on the debuglog build tag. However, both types
    21  // always exist and are always built. This helps ensure we compile as much of
    22  // the implementation as possible in the default build configuration, while also
    23  // enabling us to achieve good test coverage of the real debuglog implementation
    24  // even when the debuglog build tag is not set.
    25  
    26  package runtime
    27  
    28  import (
    29  	"internal/abi"
    30  	"internal/byteorder"
    31  	"internal/runtime/atomic"
    32  	"internal/runtime/sys"
    33  	"unsafe"
    34  )
    35  
    36  // debugLogBytes is the size of each per-M ring buffer. This is
    37  // allocated off-heap to avoid blowing up the M and hence the GC'd
    38  // heap size.
    39  const debugLogBytes = 16 << 10
    40  
    41  // debugLogStringLimit is the maximum number of bytes in a string.
    42  // Above this, the string will be truncated with "..(n more bytes).."
    43  const debugLogStringLimit = debugLogBytes / 8
    44  
    45  // dlog returns a debug logger. The caller can use methods on the
    46  // returned logger to add values, which will be space-separated in the
    47  // final output, much like println. The caller must call end() to
    48  // finish the message.
    49  //
    50  // dlog can be used from highly-constrained corners of the runtime: it
    51  // is safe to use in the signal handler, from within the write
    52  // barrier, from within the stack implementation, and in places that
    53  // must be recursively nosplit.
    54  //
    55  // This will be compiled away if built without the debuglog build tag.
    56  // However, argument construction may not be. If any of the arguments
    57  // are not literals or trivial expressions, consider protecting the
    58  // call with "if dlogEnabled".
    59  //
    60  //go:nosplit
    61  //go:nowritebarrierrec
    62  func dlog() dlogger {
    63  	// dlog1 is defined to either dlogImpl or dlogFake.
    64  	return dlog1()
    65  }
    66  
    67  //go:nosplit
    68  //go:nowritebarrierrec
    69  func dlogFake() dloggerFake {
    70  	return dloggerFake{}
    71  }
    72  
    73  //go:nosplit
    74  //go:nowritebarrierrec
    75  func dlogImpl() *dloggerImpl {
    76  	// Get the time.
    77  	tick, nano := uint64(cputicks()), uint64(nanotime())
    78  
    79  	// Try to get a cached logger.
    80  	l := getCachedDlogger()
    81  
    82  	// If we couldn't get a cached logger, try to get one from the
    83  	// global pool.
    84  	if l == nil {
    85  		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
    86  		all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
    87  		for l1 := all; l1 != nil; l1 = l1.allLink {
    88  			if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
    89  				l = l1
    90  				break
    91  			}
    92  		}
    93  	}
    94  
    95  	// If that failed, allocate a new logger.
    96  	if l == nil {
    97  		// Use sysAllocOS instead of sysAlloc because we want to interfere
    98  		// with the runtime as little as possible, and sysAlloc updates accounting.
    99  		l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
   100  		if l == nil {
   101  			throw("failed to allocate debug log")
   102  		}
   103  		l.w.r.data = &l.w.data
   104  		l.owned.Store(1)
   105  
   106  		// Prepend to allDloggers list.
   107  		headp := (*uintptr)(unsafe.Pointer(&allDloggers))
   108  		for {
   109  			head := atomic.Loaduintptr(headp)
   110  			l.allLink = (*dloggerImpl)(unsafe.Pointer(head))
   111  			if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
   112  				break
   113  			}
   114  		}
   115  	}
   116  
   117  	// If the time delta is getting too high, write a new sync
   118  	// packet. We set the limit so we don't write more than 6
   119  	// bytes of delta in the record header.
   120  	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
   121  	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
   122  		l.w.writeSync(tick, nano)
   123  	}
   124  
   125  	// Reserve space for framing header.
   126  	l.w.ensure(debugLogHeaderSize)
   127  	l.w.write += debugLogHeaderSize
   128  
   129  	// Write record header.
   130  	l.w.uvarint(tick - l.w.tick)
   131  	l.w.uvarint(nano - l.w.nano)
   132  	gp := getg()
   133  	if gp != nil && gp.m != nil && gp.m.p != 0 {
   134  		l.w.varint(int64(gp.m.p.ptr().id))
   135  	} else {
   136  		l.w.varint(-1)
   137  	}
   138  
   139  	return l
   140  }
   141  
   142  // A dloggerImpl writes to the debug log.
   143  //
   144  // To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call
   145  // end().
   146  type dloggerImpl struct {
   147  	_ sys.NotInHeap
   148  	w debugLogWriter
   149  
   150  	// allLink is the next dlogger in the allDloggers list.
   151  	allLink *dloggerImpl
   152  
   153  	// owned indicates that this dlogger is owned by an M. This is
   154  	// accessed atomically.
   155  	owned atomic.Uint32
   156  }
   157  
   158  // allDloggers is a list of all dloggers, linked through
   159  // dlogger.allLink. This is accessed atomically. This is prepend only,
   160  // so it doesn't need to protect against ABA races.
   161  var allDloggers *dloggerImpl
   162  
   163  // A dloggerFake is a no-op implementation of dlogger.
   164  type dloggerFake struct{}
   165  
   166  //go:nosplit
   167  func (l dloggerFake) end() {}
   168  
   169  //go:nosplit
   170  func (l *dloggerImpl) end() {
   171  	// Fill in framing header.
   172  	size := l.w.write - l.w.r.end
   173  	if !l.w.writeFrameAt(l.w.r.end, size) {
   174  		throw("record too large")
   175  	}
   176  
   177  	// Commit the record.
   178  	l.w.r.end = l.w.write
   179  
   180  	// Attempt to return this logger to the cache.
   181  	if putCachedDlogger(l) {
   182  		return
   183  	}
   184  
   185  	// Return the logger to the global pool.
   186  	l.owned.Store(0)
   187  }
   188  
   189  const (
   190  	debugLogUnknown = 1 + iota
   191  	debugLogBoolTrue
   192  	debugLogBoolFalse
   193  	debugLogInt
   194  	debugLogUint
   195  	debugLogHex
   196  	debugLogPtr
   197  	debugLogString
   198  	debugLogConstString
   199  	debugLogStringOverflow
   200  
   201  	debugLogPC
   202  	debugLogTraceback
   203  )
   204  
   205  //go:nosplit
   206  func (l dloggerFake) b(x bool) dloggerFake { return l }
   207  
   208  //go:nosplit
   209  func (l *dloggerImpl) b(x bool) *dloggerImpl {
   210  	if x {
   211  		l.w.byte(debugLogBoolTrue)
   212  	} else {
   213  		l.w.byte(debugLogBoolFalse)
   214  	}
   215  	return l
   216  }
   217  
   218  //go:nosplit
   219  func (l dloggerFake) i(x int) dloggerFake { return l }
   220  
   221  //go:nosplit
   222  func (l *dloggerImpl) i(x int) *dloggerImpl {
   223  	return l.i64(int64(x))
   224  }
   225  
   226  //go:nosplit
   227  func (l dloggerFake) i8(x int8) dloggerFake { return l }
   228  
   229  //go:nosplit
   230  func (l *dloggerImpl) i8(x int8) *dloggerImpl {
   231  	return l.i64(int64(x))
   232  }
   233  
   234  //go:nosplit
   235  func (l dloggerFake) i16(x int16) dloggerFake { return l }
   236  
   237  //go:nosplit
   238  func (l *dloggerImpl) i16(x int16) *dloggerImpl {
   239  	return l.i64(int64(x))
   240  }
   241  
   242  //go:nosplit
   243  func (l dloggerFake) i32(x int32) dloggerFake { return l }
   244  
   245  //go:nosplit
   246  func (l *dloggerImpl) i32(x int32) *dloggerImpl {
   247  	return l.i64(int64(x))
   248  }
   249  
   250  //go:nosplit
   251  func (l dloggerFake) i64(x int64) dloggerFake { return l }
   252  
   253  //go:nosplit
   254  func (l *dloggerImpl) i64(x int64) *dloggerImpl {
   255  	l.w.byte(debugLogInt)
   256  	l.w.varint(x)
   257  	return l
   258  }
   259  
   260  //go:nosplit
   261  func (l dloggerFake) u(x uint) dloggerFake { return l }
   262  
   263  //go:nosplit
   264  func (l *dloggerImpl) u(x uint) *dloggerImpl {
   265  	return l.u64(uint64(x))
   266  }
   267  
   268  //go:nosplit
   269  func (l dloggerFake) uptr(x uintptr) dloggerFake { return l }
   270  
   271  //go:nosplit
   272  func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl {
   273  	return l.u64(uint64(x))
   274  }
   275  
   276  //go:nosplit
   277  func (l dloggerFake) u8(x uint8) dloggerFake { return l }
   278  
   279  //go:nosplit
   280  func (l *dloggerImpl) u8(x uint8) *dloggerImpl {
   281  	return l.u64(uint64(x))
   282  }
   283  
   284  //go:nosplit
   285  func (l dloggerFake) u16(x uint16) dloggerFake { return l }
   286  
   287  //go:nosplit
   288  func (l *dloggerImpl) u16(x uint16) *dloggerImpl {
   289  	return l.u64(uint64(x))
   290  }
   291  
   292  //go:nosplit
   293  func (l dloggerFake) u32(x uint32) dloggerFake { return l }
   294  
   295  //go:nosplit
   296  func (l *dloggerImpl) u32(x uint32) *dloggerImpl {
   297  	return l.u64(uint64(x))
   298  }
   299  
   300  //go:nosplit
   301  func (l dloggerFake) u64(x uint64) dloggerFake { return l }
   302  
   303  //go:nosplit
   304  func (l *dloggerImpl) u64(x uint64) *dloggerImpl {
   305  	l.w.byte(debugLogUint)
   306  	l.w.uvarint(x)
   307  	return l
   308  }
   309  
   310  //go:nosplit
   311  func (l dloggerFake) hex(x uint64) dloggerFake { return l }
   312  
   313  //go:nosplit
   314  func (l *dloggerImpl) hex(x uint64) *dloggerImpl {
   315  	l.w.byte(debugLogHex)
   316  	l.w.uvarint(x)
   317  	return l
   318  }
   319  
   320  //go:nosplit
   321  func (l dloggerFake) p(x any) dloggerFake { return l }
   322  
   323  //go:nosplit
   324  func (l *dloggerImpl) p(x any) *dloggerImpl {
   325  	l.w.byte(debugLogPtr)
   326  	if x == nil {
   327  		l.w.uvarint(0)
   328  	} else {
   329  		v := efaceOf(&x)
   330  		switch v._type.Kind_ & abi.KindMask {
   331  		case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
   332  			l.w.uvarint(uint64(uintptr(v.data)))
   333  		default:
   334  			throw("not a pointer type")
   335  		}
   336  	}
   337  	return l
   338  }
   339  
   340  //go:nosplit
   341  func (l dloggerFake) s(x string) dloggerFake { return l }
   342  
   343  //go:nosplit
   344  func (l *dloggerImpl) s(x string) *dloggerImpl {
   345  	strData := unsafe.StringData(x)
   346  	datap := &firstmoduledata
   347  	if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
   348  		// String constants are in the rodata section, which
   349  		// isn't recorded in moduledata. But it has to be
   350  		// somewhere between etext and end.
   351  		l.w.byte(debugLogConstString)
   352  		l.w.uvarint(uint64(len(x)))
   353  		l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
   354  	} else {
   355  		l.w.byte(debugLogString)
   356  		// We can't use unsafe.Slice as it may panic, which isn't safe
   357  		// in this (potentially) nowritebarrier context.
   358  		var b []byte
   359  		bb := (*slice)(unsafe.Pointer(&b))
   360  		bb.array = unsafe.Pointer(strData)
   361  		bb.len, bb.cap = len(x), len(x)
   362  		if len(b) > debugLogStringLimit {
   363  			b = b[:debugLogStringLimit]
   364  		}
   365  		l.w.uvarint(uint64(len(b)))
   366  		l.w.bytes(b)
   367  		if len(b) != len(x) {
   368  			l.w.byte(debugLogStringOverflow)
   369  			l.w.uvarint(uint64(len(x) - len(b)))
   370  		}
   371  	}
   372  	return l
   373  }
   374  
   375  //go:nosplit
   376  func (l dloggerFake) pc(x uintptr) dloggerFake { return l }
   377  
   378  //go:nosplit
   379  func (l *dloggerImpl) pc(x uintptr) *dloggerImpl {
   380  	l.w.byte(debugLogPC)
   381  	l.w.uvarint(uint64(x))
   382  	return l
   383  }
   384  
   385  //go:nosplit
   386  func (l dloggerFake) traceback(x []uintptr) dloggerFake { return l }
   387  
   388  //go:nosplit
   389  func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl {
   390  	l.w.byte(debugLogTraceback)
   391  	l.w.uvarint(uint64(len(x)))
   392  	for _, pc := range x {
   393  		l.w.uvarint(uint64(pc))
   394  	}
   395  	return l
   396  }
   397  
   398  // A debugLogWriter is a ring buffer of binary debug log records.
   399  //
   400  // A log record consists of a 2-byte framing header and a sequence of
   401  // fields. The framing header gives the size of the record as a little
   402  // endian 16-bit value. Each field starts with a byte indicating its
   403  // type, followed by type-specific data. If the size in the framing
   404  // header is 0, it's a sync record consisting of two little endian
   405  // 64-bit values giving a new time base.
   406  //
   407  // Because this is a ring buffer, new records will eventually
   408  // overwrite old records. Hence, it maintains a reader that consumes
   409  // the log as it gets overwritten. That reader state is where an
   410  // actual log reader would start.
   411  type debugLogWriter struct {
   412  	_     sys.NotInHeap
   413  	write uint64
   414  	data  debugLogBuf
   415  
   416  	// tick and nano are the time bases from the most recently
   417  	// written sync record.
   418  	tick, nano uint64
   419  
   420  	// r is a reader that consumes records as they get overwritten
   421  	// by the writer. It also acts as the initial reader state
   422  	// when printing the log.
   423  	r debugLogReader
   424  
   425  	// buf is a scratch buffer for encoding. This is here to
   426  	// reduce stack usage.
   427  	buf [10]byte
   428  }
   429  
   430  type debugLogBuf struct {
   431  	_ sys.NotInHeap
   432  	b [debugLogBytes]byte
   433  }
   434  
   435  const (
   436  	// debugLogHeaderSize is the number of bytes in the framing
   437  	// header of every dlog record.
   438  	debugLogHeaderSize = 2
   439  
   440  	// debugLogSyncSize is the number of bytes in a sync record.
   441  	debugLogSyncSize = debugLogHeaderSize + 2*8
   442  )
   443  
   444  //go:nosplit
   445  func (l *debugLogWriter) ensure(n uint64) {
   446  	for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
   447  		// Consume record at begin.
   448  		if l.r.skip() == ^uint64(0) {
   449  			// Wrapped around within a record.
   450  			//
   451  			// TODO(austin): It would be better to just
   452  			// eat the whole buffer at this point, but we
   453  			// have to communicate that to the reader
   454  			// somehow.
   455  			throw("record wrapped around")
   456  		}
   457  	}
   458  }
   459  
   460  //go:nosplit
   461  func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
   462  	l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
   463  	l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
   464  	return size <= 0xFFFF
   465  }
   466  
   467  //go:nosplit
   468  func (l *debugLogWriter) writeSync(tick, nano uint64) {
   469  	l.tick, l.nano = tick, nano
   470  	l.ensure(debugLogHeaderSize)
   471  	l.writeFrameAt(l.write, 0)
   472  	l.write += debugLogHeaderSize
   473  	l.writeUint64LE(tick)
   474  	l.writeUint64LE(nano)
   475  	l.r.end = l.write
   476  }
   477  
   478  //go:nosplit
   479  func (l *debugLogWriter) writeUint64LE(x uint64) {
   480  	var b [8]byte
   481  	byteorder.LEPutUint64(b[:], x)
   482  	l.bytes(b[:])
   483  }
   484  
   485  //go:nosplit
   486  func (l *debugLogWriter) byte(x byte) {
   487  	l.ensure(1)
   488  	pos := l.write
   489  	l.write++
   490  	l.data.b[pos%uint64(len(l.data.b))] = x
   491  }
   492  
   493  //go:nosplit
   494  func (l *debugLogWriter) bytes(x []byte) {
   495  	l.ensure(uint64(len(x)))
   496  	pos := l.write
   497  	l.write += uint64(len(x))
   498  	for len(x) > 0 {
   499  		n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
   500  		pos += uint64(n)
   501  		x = x[n:]
   502  	}
   503  }
   504  
   505  //go:nosplit
   506  func (l *debugLogWriter) varint(x int64) {
   507  	var u uint64
   508  	if x < 0 {
   509  		u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
   510  	} else {
   511  		u = (uint64(x) << 1) // do not complement i, bit 0 is 0
   512  	}
   513  	l.uvarint(u)
   514  }
   515  
   516  //go:nosplit
   517  func (l *debugLogWriter) uvarint(u uint64) {
   518  	i := 0
   519  	for u >= 0x80 {
   520  		l.buf[i] = byte(u) | 0x80
   521  		u >>= 7
   522  		i++
   523  	}
   524  	l.buf[i] = byte(u)
   525  	i++
   526  	l.bytes(l.buf[:i])
   527  }
   528  
   529  type debugLogReader struct {
   530  	data *debugLogBuf
   531  
   532  	// begin and end are the positions in the log of the beginning
   533  	// and end of the log data, modulo len(data).
   534  	begin, end uint64
   535  
   536  	// tick and nano are the current time base at begin.
   537  	tick, nano uint64
   538  }
   539  
   540  //go:nosplit
   541  func (r *debugLogReader) skip() uint64 {
   542  	// Read size at pos.
   543  	if r.begin+debugLogHeaderSize > r.end {
   544  		return ^uint64(0)
   545  	}
   546  	size := uint64(r.readUint16LEAt(r.begin))
   547  	if size == 0 {
   548  		// Sync packet.
   549  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   550  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   551  		size = debugLogSyncSize
   552  	}
   553  	if r.begin+size > r.end {
   554  		return ^uint64(0)
   555  	}
   556  	r.begin += size
   557  	return size
   558  }
   559  
   560  //go:nosplit
   561  func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
   562  	return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
   563  		uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
   564  }
   565  
   566  //go:nosplit
   567  func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
   568  	var b [8]byte
   569  	for i := range b {
   570  		b[i] = r.data.b[pos%uint64(len(r.data.b))]
   571  		pos++
   572  	}
   573  	return byteorder.LEUint64(b[:])
   574  }
   575  
   576  func (r *debugLogReader) peek() (tick uint64) {
   577  	// Consume any sync records.
   578  	size := uint64(0)
   579  	for size == 0 {
   580  		if r.begin+debugLogHeaderSize > r.end {
   581  			return ^uint64(0)
   582  		}
   583  		size = uint64(r.readUint16LEAt(r.begin))
   584  		if size != 0 {
   585  			break
   586  		}
   587  		if r.begin+debugLogSyncSize > r.end {
   588  			return ^uint64(0)
   589  		}
   590  		// Sync packet.
   591  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   592  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   593  		r.begin += debugLogSyncSize
   594  	}
   595  
   596  	// Peek tick delta.
   597  	if r.begin+size > r.end {
   598  		return ^uint64(0)
   599  	}
   600  	pos := r.begin + debugLogHeaderSize
   601  	var u uint64
   602  	for i := uint(0); ; i += 7 {
   603  		b := r.data.b[pos%uint64(len(r.data.b))]
   604  		pos++
   605  		u |= uint64(b&^0x80) << i
   606  		if b&0x80 == 0 {
   607  			break
   608  		}
   609  	}
   610  	if pos > r.begin+size {
   611  		return ^uint64(0)
   612  	}
   613  	return r.tick + u
   614  }
   615  
   616  func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
   617  	// Read size. We've already skipped sync packets and checked
   618  	// bounds in peek.
   619  	size := uint64(r.readUint16LEAt(r.begin))
   620  	end = r.begin + size
   621  	r.begin += debugLogHeaderSize
   622  
   623  	// Read tick, nano, and p.
   624  	tick = r.uvarint() + r.tick
   625  	nano = r.uvarint() + r.nano
   626  	p = int(r.varint())
   627  
   628  	return
   629  }
   630  
   631  func (r *debugLogReader) uvarint() uint64 {
   632  	var u uint64
   633  	for i := uint(0); ; i += 7 {
   634  		b := r.data.b[r.begin%uint64(len(r.data.b))]
   635  		r.begin++
   636  		u |= uint64(b&^0x80) << i
   637  		if b&0x80 == 0 {
   638  			break
   639  		}
   640  	}
   641  	return u
   642  }
   643  
   644  func (r *debugLogReader) varint() int64 {
   645  	u := r.uvarint()
   646  	var v int64
   647  	if u&1 == 0 {
   648  		v = int64(u >> 1)
   649  	} else {
   650  		v = ^int64(u >> 1)
   651  	}
   652  	return v
   653  }
   654  
   655  func (r *debugLogReader) printVal() bool {
   656  	typ := r.data.b[r.begin%uint64(len(r.data.b))]
   657  	r.begin++
   658  
   659  	switch typ {
   660  	default:
   661  		print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
   662  		return false
   663  
   664  	case debugLogUnknown:
   665  		print("<unknown kind>")
   666  
   667  	case debugLogBoolTrue:
   668  		print(true)
   669  
   670  	case debugLogBoolFalse:
   671  		print(false)
   672  
   673  	case debugLogInt:
   674  		print(r.varint())
   675  
   676  	case debugLogUint:
   677  		print(r.uvarint())
   678  
   679  	case debugLogHex, debugLogPtr:
   680  		print(hex(r.uvarint()))
   681  
   682  	case debugLogString:
   683  		sl := r.uvarint()
   684  		if r.begin+sl > r.end {
   685  			r.begin = r.end
   686  			print("<string length corrupted>")
   687  			break
   688  		}
   689  		for sl > 0 {
   690  			b := r.data.b[r.begin%uint64(len(r.data.b)):]
   691  			if uint64(len(b)) > sl {
   692  				b = b[:sl]
   693  			}
   694  			r.begin += uint64(len(b))
   695  			sl -= uint64(len(b))
   696  			gwrite(b)
   697  		}
   698  
   699  	case debugLogConstString:
   700  		len, ptr := int(r.uvarint()), uintptr(r.uvarint())
   701  		ptr += firstmoduledata.etext
   702  		// We can't use unsafe.String as it may panic, which isn't safe
   703  		// in this (potentially) nowritebarrier context.
   704  		str := stringStruct{
   705  			str: unsafe.Pointer(ptr),
   706  			len: len,
   707  		}
   708  		s := *(*string)(unsafe.Pointer(&str))
   709  		print(s)
   710  
   711  	case debugLogStringOverflow:
   712  		print("..(", r.uvarint(), " more bytes)..")
   713  
   714  	case debugLogPC:
   715  		printDebugLogPC(uintptr(r.uvarint()), false)
   716  
   717  	case debugLogTraceback:
   718  		n := int(r.uvarint())
   719  		for i := 0; i < n; i++ {
   720  			print("\n\t")
   721  			// gentraceback PCs are always return PCs.
   722  			// Convert them to call PCs.
   723  			//
   724  			// TODO(austin): Expand inlined frames.
   725  			printDebugLogPC(uintptr(r.uvarint()), true)
   726  		}
   727  	}
   728  
   729  	return true
   730  }
   731  
   732  // printDebugLog prints the debug log.
   733  func printDebugLog() {
   734  	if dlogEnabled {
   735  		printDebugLogImpl()
   736  	}
   737  }
   738  
   739  func printDebugLogImpl() {
   740  	// This function should not panic or throw since it is used in
   741  	// the fatal panic path and this may deadlock.
   742  
   743  	printlock()
   744  
   745  	// Get the list of all debug logs.
   746  	allp := (*uintptr)(unsafe.Pointer(&allDloggers))
   747  	all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
   748  
   749  	// Count the logs.
   750  	n := 0
   751  	for l := all; l != nil; l = l.allLink {
   752  		n++
   753  	}
   754  	if n == 0 {
   755  		printunlock()
   756  		return
   757  	}
   758  
   759  	// Prepare read state for all logs.
   760  	type readState struct {
   761  		debugLogReader
   762  		first    bool
   763  		lost     uint64
   764  		nextTick uint64
   765  	}
   766  	// Use sysAllocOS instead of sysAlloc because we want to interfere
   767  	// with the runtime as little as possible, and sysAlloc updates accounting.
   768  	state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
   769  	if state1 == nil {
   770  		println("failed to allocate read state for", n, "logs")
   771  		printunlock()
   772  		return
   773  	}
   774  	state := (*[1 << 20]readState)(state1)[:n]
   775  	{
   776  		l := all
   777  		for i := range state {
   778  			s := &state[i]
   779  			s.debugLogReader = l.w.r
   780  			s.first = true
   781  			s.lost = l.w.r.begin
   782  			s.nextTick = s.peek()
   783  			l = l.allLink
   784  		}
   785  	}
   786  
   787  	// Print records.
   788  	for {
   789  		// Find the next record.
   790  		var best struct {
   791  			tick uint64
   792  			i    int
   793  		}
   794  		best.tick = ^uint64(0)
   795  		for i := range state {
   796  			if state[i].nextTick < best.tick {
   797  				best.tick = state[i].nextTick
   798  				best.i = i
   799  			}
   800  		}
   801  		if best.tick == ^uint64(0) {
   802  			break
   803  		}
   804  
   805  		// Print record.
   806  		s := &state[best.i]
   807  		if s.first {
   808  			print(">> begin log ", best.i)
   809  			if s.lost != 0 {
   810  				print("; lost first ", s.lost>>10, "KB")
   811  			}
   812  			print(" <<\n")
   813  			s.first = false
   814  		}
   815  
   816  		end, _, nano, p := s.header()
   817  		oldEnd := s.end
   818  		s.end = end
   819  
   820  		print("[")
   821  		var tmpbuf [21]byte
   822  		pnano := int64(nano) - runtimeInitTime
   823  		if pnano < 0 {
   824  			// Logged before runtimeInitTime was set.
   825  			pnano = 0
   826  		}
   827  		pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
   828  		print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
   829  		print(" P ", p, "] ")
   830  
   831  		for i := 0; s.begin < s.end; i++ {
   832  			if i > 0 {
   833  				print(" ")
   834  			}
   835  			if !s.printVal() {
   836  				// Abort this P log.
   837  				print("<aborting P log>")
   838  				end = oldEnd
   839  				break
   840  			}
   841  		}
   842  		println()
   843  
   844  		// Move on to the next record.
   845  		s.begin = end
   846  		s.end = oldEnd
   847  		s.nextTick = s.peek()
   848  	}
   849  
   850  	printunlock()
   851  }
   852  
   853  // printDebugLogPC prints a single symbolized PC. If returnPC is true,
   854  // pc is a return PC that must first be converted to a call PC.
   855  func printDebugLogPC(pc uintptr, returnPC bool) {
   856  	fn := findfunc(pc)
   857  	if returnPC && (!fn.valid() || pc > fn.entry()) {
   858  		// TODO(austin): Don't back up if the previous frame
   859  		// was a sigpanic.
   860  		pc--
   861  	}
   862  
   863  	print(hex(pc))
   864  	if !fn.valid() {
   865  		print(" [unknown PC]")
   866  	} else {
   867  		name := funcname(fn)
   868  		file, line := funcline(fn, pc)
   869  		print(" [", name, "+", hex(pc-fn.entry()),
   870  			" ", file, ":", line, "]")
   871  	}
   872  }
   873  

View as plain text