Source file src/runtime/panic.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  // throwType indicates the current type of ongoing throw, which affects the
    16  // amount of detail printed to stderr. Higher values include more detail.
    17  type throwType uint32
    18  
    19  const (
    20  	// throwTypeNone means that we are not throwing.
    21  	throwTypeNone throwType = iota
    22  
    23  	// throwTypeUser is a throw due to a problem with the application.
    24  	//
    25  	// These throws do not include runtime frames, system goroutines, or
    26  	// frame metadata.
    27  	throwTypeUser
    28  
    29  	// throwTypeRuntime is a throw due to a problem with Go itself.
    30  	//
    31  	// These throws include as much information as possible to aid in
    32  	// debugging the runtime, including runtime frames, system goroutines,
    33  	// and frame metadata.
    34  	throwTypeRuntime
    35  )
    36  
    37  // We have two different ways of doing defers. The older way involves creating a
    38  // defer record at the time that a defer statement is executing and adding it to a
    39  // defer chain. This chain is inspected by the deferreturn call at all function
    40  // exits in order to run the appropriate defer calls. A cheaper way (which we call
    41  // open-coded defers) is used for functions in which no defer statements occur in
    42  // loops. In that case, we simply store the defer function/arg information into
    43  // specific stack slots at the point of each defer statement, as well as setting a
    44  // bit in a bitmask. At each function exit, we add inline code to directly make
    45  // the appropriate defer calls based on the bitmask and fn/arg information stored
    46  // on the stack. During panic/Goexit processing, the appropriate defer calls are
    47  // made using extra funcdata info that indicates the exact stack slots that
    48  // contain the bitmask and defer fn/args.
    49  
    50  // Check to make sure we can really generate a panic. If the panic
    51  // was generated from the runtime, or from inside malloc, then convert
    52  // to a throw of msg.
    53  // pc should be the program counter of the compiler-generated code that
    54  // triggered this panic.
    55  func panicCheck1(pc uintptr, msg string) {
    56  	if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
    57  		// Note: wasm can't tail call, so we can't get the original caller's pc.
    58  		throw(msg)
    59  	}
    60  	// TODO: is this redundant? How could we be in malloc
    61  	// but not in the runtime? runtime/internal/*, maybe?
    62  	gp := getg()
    63  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    64  		throw(msg)
    65  	}
    66  }
    67  
    68  // Same as above, but calling from the runtime is allowed.
    69  //
    70  // Using this function is necessary for any panic that may be
    71  // generated by runtime.sigpanic, since those are always called by the
    72  // runtime.
    73  func panicCheck2(err string) {
    74  	// panic allocates, so to avoid recursive malloc, turn panics
    75  	// during malloc into throws.
    76  	gp := getg()
    77  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    78  		throw(err)
    79  	}
    80  }
    81  
    82  // Many of the following panic entry-points turn into throws when they
    83  // happen in various runtime contexts. These should never happen in
    84  // the runtime, and if they do, they indicate a serious issue and
    85  // should not be caught by user code.
    86  //
    87  // The panic{Index,Slice,divide,shift} functions are called by
    88  // code generated by the compiler for out of bounds index expressions,
    89  // out of bounds slice expressions, division by zero, and shift by negative.
    90  // The panicdivide (again), panicoverflow, panicfloat, and panicmem
    91  // functions are called by the signal handler when a signal occurs
    92  // indicating the respective problem.
    93  //
    94  // Since panic{Index,Slice,shift} are never called directly, and
    95  // since the runtime package should never have an out of bounds slice
    96  // or array reference or negative shift, if we see those functions called from the
    97  // runtime package we turn the panic into a throw. That will dump the
    98  // entire runtime stack for easier debugging.
    99  //
   100  // The entry points called by the signal handler will be called from
   101  // runtime.sigpanic, so we can't disallow calls from the runtime to
   102  // these (they always look like they're called from the runtime).
   103  // Hence, for these, we just check for clearly bad runtime conditions.
   104  //
   105  // The panic{Index,Slice} functions are implemented in assembly and tail call
   106  // to the goPanic{Index,Slice} functions below. This is done so we can use
   107  // a space-minimal register calling convention.
   108  
   109  // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
   110  //
   111  //go:yeswritebarrierrec
   112  func goPanicIndex(x int, y int) {
   113  	panicCheck1(getcallerpc(), "index out of range")
   114  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
   115  }
   116  
   117  //go:yeswritebarrierrec
   118  func goPanicIndexU(x uint, y int) {
   119  	panicCheck1(getcallerpc(), "index out of range")
   120  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
   121  }
   122  
   123  // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
   124  //
   125  //go:yeswritebarrierrec
   126  func goPanicSliceAlen(x int, y int) {
   127  	panicCheck1(getcallerpc(), "slice bounds out of range")
   128  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
   129  }
   130  
   131  //go:yeswritebarrierrec
   132  func goPanicSliceAlenU(x uint, y int) {
   133  	panicCheck1(getcallerpc(), "slice bounds out of range")
   134  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
   135  }
   136  
   137  //go:yeswritebarrierrec
   138  func goPanicSliceAcap(x int, y int) {
   139  	panicCheck1(getcallerpc(), "slice bounds out of range")
   140  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
   141  }
   142  
   143  //go:yeswritebarrierrec
   144  func goPanicSliceAcapU(x uint, y int) {
   145  	panicCheck1(getcallerpc(), "slice bounds out of range")
   146  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
   147  }
   148  
   149  // failures in the comparisons for s[x:y], 0 <= x <= y
   150  //
   151  //go:yeswritebarrierrec
   152  func goPanicSliceB(x int, y int) {
   153  	panicCheck1(getcallerpc(), "slice bounds out of range")
   154  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
   155  }
   156  
   157  //go:yeswritebarrierrec
   158  func goPanicSliceBU(x uint, y int) {
   159  	panicCheck1(getcallerpc(), "slice bounds out of range")
   160  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
   161  }
   162  
   163  // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
   164  func goPanicSlice3Alen(x int, y int) {
   165  	panicCheck1(getcallerpc(), "slice bounds out of range")
   166  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
   167  }
   168  func goPanicSlice3AlenU(x uint, y int) {
   169  	panicCheck1(getcallerpc(), "slice bounds out of range")
   170  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
   171  }
   172  func goPanicSlice3Acap(x int, y int) {
   173  	panicCheck1(getcallerpc(), "slice bounds out of range")
   174  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
   175  }
   176  func goPanicSlice3AcapU(x uint, y int) {
   177  	panicCheck1(getcallerpc(), "slice bounds out of range")
   178  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
   179  }
   180  
   181  // failures in the comparisons for s[:x:y], 0 <= x <= y
   182  func goPanicSlice3B(x int, y int) {
   183  	panicCheck1(getcallerpc(), "slice bounds out of range")
   184  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
   185  }
   186  func goPanicSlice3BU(x uint, y int) {
   187  	panicCheck1(getcallerpc(), "slice bounds out of range")
   188  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
   189  }
   190  
   191  // failures in the comparisons for s[x:y:], 0 <= x <= y
   192  func goPanicSlice3C(x int, y int) {
   193  	panicCheck1(getcallerpc(), "slice bounds out of range")
   194  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
   195  }
   196  func goPanicSlice3CU(x uint, y int) {
   197  	panicCheck1(getcallerpc(), "slice bounds out of range")
   198  	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
   199  }
   200  
   201  // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
   202  func goPanicSliceConvert(x int, y int) {
   203  	panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array")
   204  	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
   205  }
   206  
   207  // Implemented in assembly, as they take arguments in registers.
   208  // Declared here to mark them as ABIInternal.
   209  func panicIndex(x int, y int)
   210  func panicIndexU(x uint, y int)
   211  func panicSliceAlen(x int, y int)
   212  func panicSliceAlenU(x uint, y int)
   213  func panicSliceAcap(x int, y int)
   214  func panicSliceAcapU(x uint, y int)
   215  func panicSliceB(x int, y int)
   216  func panicSliceBU(x uint, y int)
   217  func panicSlice3Alen(x int, y int)
   218  func panicSlice3AlenU(x uint, y int)
   219  func panicSlice3Acap(x int, y int)
   220  func panicSlice3AcapU(x uint, y int)
   221  func panicSlice3B(x int, y int)
   222  func panicSlice3BU(x uint, y int)
   223  func panicSlice3C(x int, y int)
   224  func panicSlice3CU(x uint, y int)
   225  func panicSliceConvert(x int, y int)
   226  
   227  var shiftError = error(errorString("negative shift amount"))
   228  
   229  //go:yeswritebarrierrec
   230  func panicshift() {
   231  	panicCheck1(getcallerpc(), "negative shift amount")
   232  	panic(shiftError)
   233  }
   234  
   235  var divideError = error(errorString("integer divide by zero"))
   236  
   237  //go:yeswritebarrierrec
   238  func panicdivide() {
   239  	panicCheck2("integer divide by zero")
   240  	panic(divideError)
   241  }
   242  
   243  var overflowError = error(errorString("integer overflow"))
   244  
   245  func panicoverflow() {
   246  	panicCheck2("integer overflow")
   247  	panic(overflowError)
   248  }
   249  
   250  var floatError = error(errorString("floating point error"))
   251  
   252  func panicfloat() {
   253  	panicCheck2("floating point error")
   254  	panic(floatError)
   255  }
   256  
   257  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
   258  
   259  func panicmem() {
   260  	panicCheck2("invalid memory address or nil pointer dereference")
   261  	panic(memoryError)
   262  }
   263  
   264  func panicmemAddr(addr uintptr) {
   265  	panicCheck2("invalid memory address or nil pointer dereference")
   266  	panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
   267  }
   268  
   269  // Create a new deferred function fn, which has no arguments and results.
   270  // The compiler turns a defer statement into a call to this.
   271  func deferproc(fn func()) {
   272  	gp := getg()
   273  	if gp.m.curg != gp {
   274  		// go code on the system stack can't defer
   275  		throw("defer on system stack")
   276  	}
   277  
   278  	d := newdefer()
   279  	d.link = gp._defer
   280  	gp._defer = d
   281  	d.fn = fn
   282  	d.pc = getcallerpc()
   283  	// We must not be preempted between calling getcallersp and
   284  	// storing it to d.sp because getcallersp's result is a
   285  	// uintptr stack pointer.
   286  	d.sp = getcallersp()
   287  
   288  	// deferproc returns 0 normally.
   289  	// a deferred func that stops a panic
   290  	// makes the deferproc return 1.
   291  	// the code the compiler generates always
   292  	// checks the return value and jumps to the
   293  	// end of the function if deferproc returns != 0.
   294  	return0()
   295  	// No code can go here - the C return register has
   296  	// been set and must not be clobbered.
   297  }
   298  
   299  var rangeExitError = error(errorString("range function continued iteration after exit"))
   300  
   301  //go:noinline
   302  func panicrangeexit() {
   303  	panic(rangeExitError)
   304  }
   305  
   306  // deferrangefunc is called by functions that are about to
   307  // execute a range-over-function loop in which the loop body
   308  // may execute a defer statement. That defer needs to add to
   309  // the chain for the current function, not the func literal synthesized
   310  // to represent the loop body. To do that, the original function
   311  // calls deferrangefunc to obtain an opaque token representing
   312  // the current frame, and then the loop body uses deferprocat
   313  // instead of deferproc to add to that frame's defer lists.
   314  //
   315  // The token is an 'any' with underlying type *atomic.Pointer[_defer].
   316  // It is the atomically-updated head of a linked list of _defer structs
   317  // representing deferred calls. At the same time, we create a _defer
   318  // struct on the main g._defer list with d.head set to this head pointer.
   319  //
   320  // The g._defer list is now a linked list of deferred calls,
   321  // but an atomic list hanging off:
   322  //
   323  //		g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
   324  //	                             | .head
   325  //	                             |
   326  //	                             +--> dY -> dX -> nil
   327  //
   328  // with each -> indicating a d.link pointer, and where drangefunc
   329  // has the d.rangefunc = true bit set.
   330  // Note that the function being ranged over may have added
   331  // its own defers (d4 and d3), so drangefunc need not be at the
   332  // top of the list when deferprocat is used. This is why we pass
   333  // the atomic head explicitly.
   334  //
   335  // To keep misbehaving programs from crashing the runtime,
   336  // deferprocat pushes new defers onto the .head list atomically.
   337  // The fact that it is a separate list from the main goroutine
   338  // defer list means that the main goroutine's defers can still
   339  // be handled non-atomically.
   340  //
   341  // In the diagram, dY and dX are meant to be processed when
   342  // drangefunc would be processed, which is to say the defer order
   343  // should be d4, d3, dY, dX, d2, d1. To make that happen,
   344  // when defer processing reaches a d with rangefunc=true,
   345  // it calls deferconvert to atomically take the extras
   346  // away from d.head and then adds them to the main list.
   347  //
   348  // That is, deferconvert changes this list:
   349  //
   350  //		g._defer => drangefunc -> d2 -> d1 -> nil
   351  //	                 | .head
   352  //	                 |
   353  //	                 +--> dY -> dX -> nil
   354  //
   355  // into this list:
   356  //
   357  //	g._defer => dY -> dX -> d2 -> d1 -> nil
   358  //
   359  // It also poisons *drangefunc.head so that any future
   360  // deferprocat using that head will throw.
   361  // (The atomic head is ordinary garbage collected memory so that
   362  // it's not a problem if user code holds onto it beyond
   363  // the lifetime of drangefunc.)
   364  //
   365  // TODO: We could arrange for the compiler to call into the
   366  // runtime after the loop finishes normally, to do an eager
   367  // deferconvert, which would catch calling the loop body
   368  // and having it defer after the loop is done. If we have a
   369  // more general catch of loop body misuse, though, this
   370  // might not be worth worrying about in addition.
   371  //
   372  // See also ../cmd/compile/internal/rangefunc/rewrite.go.
   373  func deferrangefunc() any {
   374  	gp := getg()
   375  	if gp.m.curg != gp {
   376  		// go code on the system stack can't defer
   377  		throw("defer on system stack")
   378  	}
   379  
   380  	d := newdefer()
   381  	d.link = gp._defer
   382  	gp._defer = d
   383  	d.pc = getcallerpc()
   384  	// We must not be preempted between calling getcallersp and
   385  	// storing it to d.sp because getcallersp's result is a
   386  	// uintptr stack pointer.
   387  	d.sp = getcallersp()
   388  
   389  	d.rangefunc = true
   390  	d.head = new(atomic.Pointer[_defer])
   391  
   392  	return d.head
   393  }
   394  
   395  // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
   396  func badDefer() *_defer {
   397  	return (*_defer)(unsafe.Pointer(uintptr(1)))
   398  }
   399  
   400  // deferprocat is like deferproc but adds to the atomic list represented by frame.
   401  // See the doc comment for deferrangefunc for details.
   402  func deferprocat(fn func(), frame any) {
   403  	head := frame.(*atomic.Pointer[_defer])
   404  	if raceenabled {
   405  		racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat))
   406  	}
   407  	d1 := newdefer()
   408  	d1.fn = fn
   409  	for {
   410  		d1.link = head.Load()
   411  		if d1.link == badDefer() {
   412  			throw("defer after range func returned")
   413  		}
   414  		if head.CompareAndSwap(d1.link, d1) {
   415  			break
   416  		}
   417  	}
   418  
   419  	// Must be last - see deferproc above.
   420  	return0()
   421  }
   422  
   423  // deferconvert converts the rangefunc defer list of d0 into an ordinary list
   424  // following d0.
   425  // See the doc comment for deferrangefunc for details.
   426  func deferconvert(d0 *_defer) {
   427  	head := d0.head
   428  	if raceenabled {
   429  		racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
   430  	}
   431  	tail := d0.link
   432  	d0.rangefunc = false
   433  
   434  	var d *_defer
   435  	for {
   436  		d = head.Load()
   437  		if head.CompareAndSwap(d, badDefer()) {
   438  			break
   439  		}
   440  	}
   441  	if d == nil {
   442  		return
   443  	}
   444  	for d1 := d; ; d1 = d1.link {
   445  		d1.sp = d0.sp
   446  		d1.pc = d0.pc
   447  		if d1.link == nil {
   448  			d1.link = tail
   449  			break
   450  		}
   451  	}
   452  	d0.link = d
   453  	return
   454  }
   455  
   456  // deferprocStack queues a new deferred function with a defer record on the stack.
   457  // The defer record must have its fn field initialized.
   458  // All other fields can contain junk.
   459  // Nosplit because of the uninitialized pointer fields on the stack.
   460  //
   461  //go:nosplit
   462  func deferprocStack(d *_defer) {
   463  	gp := getg()
   464  	if gp.m.curg != gp {
   465  		// go code on the system stack can't defer
   466  		throw("defer on system stack")
   467  	}
   468  	// fn is already set.
   469  	// The other fields are junk on entry to deferprocStack and
   470  	// are initialized here.
   471  	d.heap = false
   472  	d.rangefunc = false
   473  	d.sp = getcallersp()
   474  	d.pc = getcallerpc()
   475  	// The lines below implement:
   476  	//   d.panic = nil
   477  	//   d.fd = nil
   478  	//   d.link = gp._defer
   479  	//   d.head = nil
   480  	//   gp._defer = d
   481  	// But without write barriers. The first three are writes to
   482  	// the stack so they don't need a write barrier, and furthermore
   483  	// are to uninitialized memory, so they must not use a write barrier.
   484  	// The fourth write does not require a write barrier because we
   485  	// explicitly mark all the defer structures, so we don't need to
   486  	// keep track of pointers to them with a write barrier.
   487  	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
   488  	*(*uintptr)(unsafe.Pointer(&d.head)) = 0
   489  	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
   490  
   491  	return0()
   492  	// No code can go here - the C return register has
   493  	// been set and must not be clobbered.
   494  }
   495  
   496  // Each P holds a pool for defers.
   497  
   498  // Allocate a Defer, usually using per-P pool.
   499  // Each defer must be released with freedefer.  The defer is not
   500  // added to any defer chain yet.
   501  func newdefer() *_defer {
   502  	var d *_defer
   503  	mp := acquirem()
   504  	pp := mp.p.ptr()
   505  	if len(pp.deferpool) == 0 && sched.deferpool != nil {
   506  		lock(&sched.deferlock)
   507  		for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
   508  			d := sched.deferpool
   509  			sched.deferpool = d.link
   510  			d.link = nil
   511  			pp.deferpool = append(pp.deferpool, d)
   512  		}
   513  		unlock(&sched.deferlock)
   514  	}
   515  	if n := len(pp.deferpool); n > 0 {
   516  		d = pp.deferpool[n-1]
   517  		pp.deferpool[n-1] = nil
   518  		pp.deferpool = pp.deferpool[:n-1]
   519  	}
   520  	releasem(mp)
   521  	mp, pp = nil, nil
   522  
   523  	if d == nil {
   524  		// Allocate new defer.
   525  		d = new(_defer)
   526  	}
   527  	d.heap = true
   528  	return d
   529  }
   530  
   531  // popDefer pops the head of gp's defer list and frees it.
   532  func popDefer(gp *g) {
   533  	d := gp._defer
   534  	d.fn = nil // Can in theory point to the stack
   535  	// We must not copy the stack between the updating gp._defer and setting
   536  	// d.link to nil. Between these two steps, d is not on any defer list, so
   537  	// stack copying won't adjust stack pointers in it (namely, d.link). Hence,
   538  	// if we were to copy the stack, d could then contain a stale pointer.
   539  	gp._defer = d.link
   540  	d.link = nil
   541  	// After this point we can copy the stack.
   542  
   543  	if !d.heap {
   544  		return
   545  	}
   546  
   547  	mp := acquirem()
   548  	pp := mp.p.ptr()
   549  	if len(pp.deferpool) == cap(pp.deferpool) {
   550  		// Transfer half of local cache to the central cache.
   551  		var first, last *_defer
   552  		for len(pp.deferpool) > cap(pp.deferpool)/2 {
   553  			n := len(pp.deferpool)
   554  			d := pp.deferpool[n-1]
   555  			pp.deferpool[n-1] = nil
   556  			pp.deferpool = pp.deferpool[:n-1]
   557  			if first == nil {
   558  				first = d
   559  			} else {
   560  				last.link = d
   561  			}
   562  			last = d
   563  		}
   564  		lock(&sched.deferlock)
   565  		last.link = sched.deferpool
   566  		sched.deferpool = first
   567  		unlock(&sched.deferlock)
   568  	}
   569  
   570  	*d = _defer{}
   571  
   572  	pp.deferpool = append(pp.deferpool, d)
   573  
   574  	releasem(mp)
   575  	mp, pp = nil, nil
   576  }
   577  
   578  // deferreturn runs deferred functions for the caller's frame.
   579  // The compiler inserts a call to this at the end of any
   580  // function which calls defer.
   581  func deferreturn() {
   582  	var p _panic
   583  	p.deferreturn = true
   584  
   585  	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
   586  	for {
   587  		fn, ok := p.nextDefer()
   588  		if !ok {
   589  			break
   590  		}
   591  		fn()
   592  	}
   593  }
   594  
   595  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   596  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   597  // is not a panic, any recover calls in those deferred functions will return nil.
   598  //
   599  // Calling Goexit from the main goroutine terminates that goroutine
   600  // without func main returning. Since func main has not returned,
   601  // the program continues execution of other goroutines.
   602  // If all other goroutines exit, the program crashes.
   603  func Goexit() {
   604  	// Create a panic object for Goexit, so we can recognize when it might be
   605  	// bypassed by a recover().
   606  	var p _panic
   607  	p.goexit = true
   608  
   609  	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
   610  	for {
   611  		fn, ok := p.nextDefer()
   612  		if !ok {
   613  			break
   614  		}
   615  		fn()
   616  	}
   617  
   618  	goexit1()
   619  }
   620  
   621  // Call all Error and String methods before freezing the world.
   622  // Used when crashing with panicking.
   623  func preprintpanics(p *_panic) {
   624  	defer func() {
   625  		text := "panic while printing panic value"
   626  		switch r := recover().(type) {
   627  		case nil:
   628  			// nothing to do
   629  		case string:
   630  			throw(text + ": " + r)
   631  		default:
   632  			throw(text + ": type " + toRType(efaceOf(&r)._type).string())
   633  		}
   634  	}()
   635  	for p != nil {
   636  		switch v := p.arg.(type) {
   637  		case error:
   638  			p.arg = v.Error()
   639  		case stringer:
   640  			p.arg = v.String()
   641  		}
   642  		p = p.link
   643  	}
   644  }
   645  
   646  // Print all currently active panics. Used when crashing.
   647  // Should only be called after preprintpanics.
   648  func printpanics(p *_panic) {
   649  	if p.link != nil {
   650  		printpanics(p.link)
   651  		if !p.link.goexit {
   652  			print("\t")
   653  		}
   654  	}
   655  	if p.goexit {
   656  		return
   657  	}
   658  	print("panic: ")
   659  	printany(p.arg)
   660  	if p.recovered {
   661  		print(" [recovered]")
   662  	}
   663  	print("\n")
   664  }
   665  
   666  // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
   667  // uint32 and a pointer to the byte following the varint.
   668  //
   669  // The implementation is the same with runtime.readvarint, except that this function
   670  // uses unsafe.Pointer for speed.
   671  func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
   672  	var r uint32
   673  	var shift int
   674  	for {
   675  		b := *(*uint8)(fd)
   676  		fd = add(fd, unsafe.Sizeof(b))
   677  		if b < 128 {
   678  			return r + uint32(b)<<shift, fd
   679  		}
   680  		r += uint32(b&0x7F) << (shift & 31)
   681  		shift += 7
   682  		if shift > 28 {
   683  			panic("Bad varint")
   684  		}
   685  	}
   686  }
   687  
   688  // A PanicNilError happens when code calls panic(nil).
   689  //
   690  // Before Go 1.21, programs that called panic(nil) observed recover returning nil.
   691  // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
   692  // Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
   693  type PanicNilError struct {
   694  	// This field makes PanicNilError structurally different from
   695  	// any other struct in this package, and the _ makes it different
   696  	// from any struct in other packages too.
   697  	// This avoids any accidental conversions being possible
   698  	// between this struct and some other struct sharing the same fields,
   699  	// like happened in go.dev/issue/56603.
   700  	_ [0]*PanicNilError
   701  }
   702  
   703  func (*PanicNilError) Error() string { return "panic called with nil argument" }
   704  func (*PanicNilError) RuntimeError() {}
   705  
   706  var panicnil = &godebugInc{name: "panicnil"}
   707  
   708  // The implementation of the predeclared function panic.
   709  func gopanic(e any) {
   710  	if e == nil {
   711  		if debug.panicnil.Load() != 1 {
   712  			e = new(PanicNilError)
   713  		} else {
   714  			panicnil.IncNonDefault()
   715  		}
   716  	}
   717  
   718  	gp := getg()
   719  	if gp.m.curg != gp {
   720  		print("panic: ")
   721  		printany(e)
   722  		print("\n")
   723  		throw("panic on system stack")
   724  	}
   725  
   726  	if gp.m.mallocing != 0 {
   727  		print("panic: ")
   728  		printany(e)
   729  		print("\n")
   730  		throw("panic during malloc")
   731  	}
   732  	if gp.m.preemptoff != "" {
   733  		print("panic: ")
   734  		printany(e)
   735  		print("\n")
   736  		print("preempt off reason: ")
   737  		print(gp.m.preemptoff)
   738  		print("\n")
   739  		throw("panic during preemptoff")
   740  	}
   741  	if gp.m.locks != 0 {
   742  		print("panic: ")
   743  		printany(e)
   744  		print("\n")
   745  		throw("panic holding locks")
   746  	}
   747  
   748  	var p _panic
   749  	p.arg = e
   750  
   751  	runningPanicDefers.Add(1)
   752  
   753  	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
   754  	for {
   755  		fn, ok := p.nextDefer()
   756  		if !ok {
   757  			break
   758  		}
   759  		fn()
   760  	}
   761  
   762  	// If we're tracing, flush the current generation to make the trace more
   763  	// readable.
   764  	//
   765  	// TODO(aktau): Handle a panic from within traceAdvance more gracefully.
   766  	// Currently it would hang. Not handled now because it is very unlikely, and
   767  	// already unrecoverable.
   768  	if traceEnabled() {
   769  		traceAdvance(false)
   770  	}
   771  
   772  	// ran out of deferred calls - old-school panic now
   773  	// Because it is unsafe to call arbitrary user code after freezing
   774  	// the world, we call preprintpanics to invoke all necessary Error
   775  	// and String methods to prepare the panic strings before startpanic.
   776  	preprintpanics(&p)
   777  
   778  	fatalpanic(&p)   // should not return
   779  	*(*int)(nil) = 0 // not reached
   780  }
   781  
   782  // start initializes a panic to start unwinding the stack.
   783  //
   784  // If p.goexit is true, then start may return multiple times.
   785  func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
   786  	gp := getg()
   787  
   788  	// Record the caller's PC and SP, so recovery can identify panics
   789  	// that have been recovered. Also, so that if p is from Goexit, we
   790  	// can restart its defer processing loop if a recovered panic tries
   791  	// to jump past it.
   792  	p.startPC = getcallerpc()
   793  	p.startSP = unsafe.Pointer(getcallersp())
   794  
   795  	if p.deferreturn {
   796  		p.sp = sp
   797  
   798  		if s := (*savedOpenDeferState)(gp.param); s != nil {
   799  			// recovery saved some state for us, so that we can resume
   800  			// calling open-coded defers without unwinding the stack.
   801  
   802  			gp.param = nil
   803  
   804  			p.retpc = s.retpc
   805  			p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
   806  			p.slotsPtr = add(sp, s.slotsOffset)
   807  		}
   808  
   809  		return
   810  	}
   811  
   812  	p.link = gp._panic
   813  	gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
   814  
   815  	// Initialize state machine, and find the first frame with a defer.
   816  	//
   817  	// Note: We could use startPC and startSP here, but callers will
   818  	// never have defer statements themselves. By starting at their
   819  	// caller instead, we avoid needing to unwind through an extra
   820  	// frame. It also somewhat simplifies the terminating condition for
   821  	// deferreturn.
   822  	p.lr, p.fp = pc, sp
   823  	p.nextFrame()
   824  }
   825  
   826  // nextDefer returns the next deferred function to invoke, if any.
   827  //
   828  // Note: The "ok bool" result is necessary to correctly handle when
   829  // the deferred function itself was nil (e.g., "defer (func())(nil)").
   830  func (p *_panic) nextDefer() (func(), bool) {
   831  	gp := getg()
   832  
   833  	if !p.deferreturn {
   834  		if gp._panic != p {
   835  			throw("bad panic stack")
   836  		}
   837  
   838  		if p.recovered {
   839  			mcall(recovery) // does not return
   840  			throw("recovery failed")
   841  		}
   842  	}
   843  
   844  	// The assembler adjusts p.argp in wrapper functions that shouldn't
   845  	// be visible to recover(), so we need to restore it each iteration.
   846  	p.argp = add(p.startSP, sys.MinFrameSize)
   847  
   848  	for {
   849  		for p.deferBitsPtr != nil {
   850  			bits := *p.deferBitsPtr
   851  
   852  			// Check whether any open-coded defers are still pending.
   853  			//
   854  			// Note: We need to check this upfront (rather than after
   855  			// clearing the top bit) because it's possible that Goexit
   856  			// invokes a deferred call, and there were still more pending
   857  			// open-coded defers in the frame; but then the deferred call
   858  			// panic and invoked the remaining defers in the frame, before
   859  			// recovering and restarting the Goexit loop.
   860  			if bits == 0 {
   861  				p.deferBitsPtr = nil
   862  				break
   863  			}
   864  
   865  			// Find index of top bit set.
   866  			i := 7 - uintptr(sys.LeadingZeros8(bits))
   867  
   868  			// Clear bit and store it back.
   869  			bits &^= 1 << i
   870  			*p.deferBitsPtr = bits
   871  
   872  			return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
   873  		}
   874  
   875  	Recheck:
   876  		if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
   877  			if d.rangefunc {
   878  				deferconvert(d)
   879  				popDefer(gp)
   880  				goto Recheck
   881  			}
   882  
   883  			fn := d.fn
   884  
   885  			// TODO(mdempsky): Instead of having each deferproc call have
   886  			// its own "deferreturn(); return" sequence, we should just make
   887  			// them reuse the one we emit for open-coded defers.
   888  			p.retpc = d.pc
   889  
   890  			// Unlink and free.
   891  			popDefer(gp)
   892  
   893  			return fn, true
   894  		}
   895  
   896  		if !p.nextFrame() {
   897  			return nil, false
   898  		}
   899  	}
   900  }
   901  
   902  // nextFrame finds the next frame that contains deferred calls, if any.
   903  func (p *_panic) nextFrame() (ok bool) {
   904  	if p.lr == 0 {
   905  		return false
   906  	}
   907  
   908  	gp := getg()
   909  	systemstack(func() {
   910  		var limit uintptr
   911  		if d := gp._defer; d != nil {
   912  			limit = d.sp
   913  		}
   914  
   915  		var u unwinder
   916  		u.initAt(p.lr, uintptr(p.fp), 0, gp, 0)
   917  		for {
   918  			if !u.valid() {
   919  				p.lr = 0
   920  				return // ok == false
   921  			}
   922  
   923  			// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
   924  			// every frame containing a defer (not just open-coded defers),
   925  			// then we can simply loop until we find the next frame where
   926  			// it's non-zero.
   927  
   928  			if u.frame.sp == limit {
   929  				break // found a frame with linked defers
   930  			}
   931  
   932  			if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
   933  				break // found a frame with open-coded defers
   934  			}
   935  
   936  			u.next()
   937  		}
   938  
   939  		p.lr = u.frame.lr
   940  		p.sp = unsafe.Pointer(u.frame.sp)
   941  		p.fp = unsafe.Pointer(u.frame.fp)
   942  
   943  		ok = true
   944  	})
   945  
   946  	return
   947  }
   948  
   949  func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
   950  	fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
   951  	if fd == nil {
   952  		return false
   953  	}
   954  
   955  	if fn.deferreturn == 0 {
   956  		throw("missing deferreturn")
   957  	}
   958  
   959  	deferBitsOffset, fd := readvarintUnsafe(fd)
   960  	deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
   961  	if *deferBitsPtr == 0 {
   962  		return false // has open-coded defers, but none pending
   963  	}
   964  
   965  	slotsOffset, fd := readvarintUnsafe(fd)
   966  
   967  	p.retpc = fn.entry() + uintptr(fn.deferreturn)
   968  	p.deferBitsPtr = deferBitsPtr
   969  	p.slotsPtr = add(varp, -uintptr(slotsOffset))
   970  
   971  	return true
   972  }
   973  
   974  // The implementation of the predeclared function recover.
   975  // Cannot split the stack because it needs to reliably
   976  // find the stack segment of its caller.
   977  //
   978  // TODO(rsc): Once we commit to CopyStackAlways,
   979  // this doesn't need to be nosplit.
   980  //
   981  //go:nosplit
   982  func gorecover(argp uintptr) any {
   983  	// Must be in a function running as part of a deferred call during the panic.
   984  	// Must be called from the topmost function of the call
   985  	// (the function used in the defer statement).
   986  	// p.argp is the argument pointer of that topmost deferred function call.
   987  	// Compare against argp reported by caller.
   988  	// If they match, the caller is the one who can recover.
   989  	gp := getg()
   990  	p := gp._panic
   991  	if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
   992  		p.recovered = true
   993  		return p.arg
   994  	}
   995  	return nil
   996  }
   997  
   998  //go:linkname sync_throw sync.throw
   999  func sync_throw(s string) {
  1000  	throw(s)
  1001  }
  1002  
  1003  //go:linkname sync_fatal sync.fatal
  1004  func sync_fatal(s string) {
  1005  	fatal(s)
  1006  }
  1007  
  1008  // throw triggers a fatal error that dumps a stack trace and exits.
  1009  //
  1010  // throw should be used for runtime-internal fatal errors where Go itself,
  1011  // rather than user code, may be at fault for the failure.
  1012  //
  1013  //go:nosplit
  1014  func throw(s string) {
  1015  	// Everything throw does should be recursively nosplit so it
  1016  	// can be called even when it's unsafe to grow the stack.
  1017  	systemstack(func() {
  1018  		print("fatal error: ", s, "\n")
  1019  	})
  1020  
  1021  	fatalthrow(throwTypeRuntime)
  1022  }
  1023  
  1024  // fatal triggers a fatal error that dumps a stack trace and exits.
  1025  //
  1026  // fatal is equivalent to throw, but is used when user code is expected to be
  1027  // at fault for the failure, such as racing map writes.
  1028  //
  1029  // fatal does not include runtime frames, system goroutines, or frame metadata
  1030  // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
  1031  //
  1032  //go:nosplit
  1033  func fatal(s string) {
  1034  	// Everything fatal does should be recursively nosplit so it
  1035  	// can be called even when it's unsafe to grow the stack.
  1036  	systemstack(func() {
  1037  		print("fatal error: ", s, "\n")
  1038  	})
  1039  
  1040  	fatalthrow(throwTypeUser)
  1041  }
  1042  
  1043  // runningPanicDefers is non-zero while running deferred functions for panic.
  1044  // This is used to try hard to get a panic stack trace out when exiting.
  1045  var runningPanicDefers atomic.Uint32
  1046  
  1047  // panicking is non-zero when crashing the program for an unrecovered panic.
  1048  var panicking atomic.Uint32
  1049  
  1050  // paniclk is held while printing the panic information and stack trace,
  1051  // so that two concurrent panics don't overlap their output.
  1052  var paniclk mutex
  1053  
  1054  // Unwind the stack after a deferred function calls recover
  1055  // after a panic. Then arrange to continue running as though
  1056  // the caller of the deferred function returned normally.
  1057  //
  1058  // However, if unwinding the stack would skip over a Goexit call, we
  1059  // return into the Goexit loop instead, so it can continue processing
  1060  // defers instead.
  1061  func recovery(gp *g) {
  1062  	p := gp._panic
  1063  	pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
  1064  	p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
  1065  
  1066  	// Unwind the panic stack.
  1067  	for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
  1068  		// Don't allow jumping past a pending Goexit.
  1069  		// Instead, have its _panic.start() call return again.
  1070  		//
  1071  		// TODO(mdempsky): In this case, Goexit will resume walking the
  1072  		// stack where it left off, which means it will need to rewalk
  1073  		// frames that we've already processed.
  1074  		//
  1075  		// There's a similar issue with nested panics, when the inner
  1076  		// panic supercedes the outer panic. Again, we end up needing to
  1077  		// walk the same stack frames.
  1078  		//
  1079  		// These are probably pretty rare occurrences in practice, and
  1080  		// they don't seem any worse than the existing logic. But if we
  1081  		// move the unwinding state into _panic, we could detect when we
  1082  		// run into where the last panic started, and then just pick up
  1083  		// where it left off instead.
  1084  		//
  1085  		// With how subtle defer handling is, this might not actually be
  1086  		// worthwhile though.
  1087  		if p.goexit {
  1088  			pc, sp = p.startPC, uintptr(p.startSP)
  1089  			saveOpenDeferState = false // goexit is unwinding the stack anyway
  1090  			break
  1091  		}
  1092  
  1093  		runningPanicDefers.Add(-1)
  1094  	}
  1095  	gp._panic = p
  1096  
  1097  	if p == nil { // must be done with signal
  1098  		gp.sig = 0
  1099  	}
  1100  
  1101  	if gp.param != nil {
  1102  		throw("unexpected gp.param")
  1103  	}
  1104  	if saveOpenDeferState {
  1105  		// If we're returning to deferreturn and there are more open-coded
  1106  		// defers for it to call, save enough state for it to be able to
  1107  		// pick up where p0 left off.
  1108  		gp.param = unsafe.Pointer(&savedOpenDeferState{
  1109  			retpc: p0.retpc,
  1110  
  1111  			// We need to save deferBitsPtr and slotsPtr too, but those are
  1112  			// stack pointers. To avoid issues around heap objects pointing
  1113  			// to the stack, save them as offsets from SP.
  1114  			deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
  1115  			slotsOffset:     uintptr(p0.slotsPtr) - uintptr(p0.sp),
  1116  		})
  1117  	}
  1118  
  1119  	// TODO(mdempsky): Currently, we rely on frames containing "defer"
  1120  	// to end with "CALL deferreturn; RET". This allows deferreturn to
  1121  	// finish running any pending defers in the frame.
  1122  	//
  1123  	// But we should be able to tell whether there are still pending
  1124  	// defers here. If there aren't, we can just jump directly to the
  1125  	// "RET" instruction. And if there are, we don't need an actual
  1126  	// "CALL deferreturn" instruction; we can simulate it with something
  1127  	// like:
  1128  	//
  1129  	//	if usesLR {
  1130  	//		lr = pc
  1131  	//	} else {
  1132  	//		sp -= sizeof(pc)
  1133  	//		*(*uintptr)(sp) = pc
  1134  	//	}
  1135  	//	pc = funcPC(deferreturn)
  1136  	//
  1137  	// So that we effectively tail call into deferreturn, such that it
  1138  	// then returns to the simple "RET" epilogue. That would save the
  1139  	// overhead of the "deferreturn" call when there aren't actually any
  1140  	// pending defers left, and shrink the TEXT size of compiled
  1141  	// binaries. (Admittedly, both of these are modest savings.)
  1142  
  1143  	// Ensure we're recovering within the appropriate stack.
  1144  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
  1145  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1146  		throw("bad recovery")
  1147  	}
  1148  
  1149  	// Make the deferproc for this d return again,
  1150  	// this time returning 1. The calling function will
  1151  	// jump to the standard return epilogue.
  1152  	gp.sched.sp = sp
  1153  	gp.sched.pc = pc
  1154  	gp.sched.lr = 0
  1155  	// Restore the bp on platforms that support frame pointers.
  1156  	// N.B. It's fine to not set anything for platforms that don't
  1157  	// support frame pointers, since nothing consumes them.
  1158  	switch {
  1159  	case goarch.IsAmd64 != 0:
  1160  		// on x86, fp actually points one word higher than the top of
  1161  		// the frame since the return address is saved on the stack by
  1162  		// the caller
  1163  		gp.sched.bp = fp - 2*goarch.PtrSize
  1164  	case goarch.IsArm64 != 0:
  1165  		// on arm64, the architectural bp points one word higher
  1166  		// than the sp. fp is totally useless to us here, because it
  1167  		// only gets us to the caller's fp.
  1168  		gp.sched.bp = sp - goarch.PtrSize
  1169  	}
  1170  	gp.sched.ret = 1
  1171  	gogo(&gp.sched)
  1172  }
  1173  
  1174  // fatalthrow implements an unrecoverable runtime throw. It freezes the
  1175  // system, prints stack traces starting from its caller, and terminates the
  1176  // process.
  1177  //
  1178  //go:nosplit
  1179  func fatalthrow(t throwType) {
  1180  	pc := getcallerpc()
  1181  	sp := getcallersp()
  1182  	gp := getg()
  1183  
  1184  	if gp.m.throwing == throwTypeNone {
  1185  		gp.m.throwing = t
  1186  	}
  1187  
  1188  	// Switch to the system stack to avoid any stack growth, which may make
  1189  	// things worse if the runtime is in a bad state.
  1190  	systemstack(func() {
  1191  		if isSecureMode() {
  1192  			exit(2)
  1193  		}
  1194  
  1195  		startpanic_m()
  1196  
  1197  		if dopanic_m(gp, pc, sp) {
  1198  			// crash uses a decent amount of nosplit stack and we're already
  1199  			// low on stack in throw, so crash on the system stack (unlike
  1200  			// fatalpanic).
  1201  			crash()
  1202  		}
  1203  
  1204  		exit(2)
  1205  	})
  1206  
  1207  	*(*int)(nil) = 0 // not reached
  1208  }
  1209  
  1210  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
  1211  // that if msgs != nil, fatalpanic also prints panic messages and decrements
  1212  // runningPanicDefers once main is blocked from exiting.
  1213  //
  1214  //go:nosplit
  1215  func fatalpanic(msgs *_panic) {
  1216  	pc := getcallerpc()
  1217  	sp := getcallersp()
  1218  	gp := getg()
  1219  	var docrash bool
  1220  	// Switch to the system stack to avoid any stack growth, which
  1221  	// may make things worse if the runtime is in a bad state.
  1222  	systemstack(func() {
  1223  		if startpanic_m() && msgs != nil {
  1224  			// There were panic messages and startpanic_m
  1225  			// says it's okay to try to print them.
  1226  
  1227  			// startpanic_m set panicking, which will
  1228  			// block main from exiting, so now OK to
  1229  			// decrement runningPanicDefers.
  1230  			runningPanicDefers.Add(-1)
  1231  
  1232  			printpanics(msgs)
  1233  		}
  1234  
  1235  		docrash = dopanic_m(gp, pc, sp)
  1236  	})
  1237  
  1238  	if docrash {
  1239  		// By crashing outside the above systemstack call, debuggers
  1240  		// will not be confused when generating a backtrace.
  1241  		// Function crash is marked nosplit to avoid stack growth.
  1242  		crash()
  1243  	}
  1244  
  1245  	systemstack(func() {
  1246  		exit(2)
  1247  	})
  1248  
  1249  	*(*int)(nil) = 0 // not reached
  1250  }
  1251  
  1252  // startpanic_m prepares for an unrecoverable panic.
  1253  //
  1254  // It returns true if panic messages should be printed, or false if
  1255  // the runtime is in bad shape and should just print stacks.
  1256  //
  1257  // It must not have write barriers even though the write barrier
  1258  // explicitly ignores writes once dying > 0. Write barriers still
  1259  // assume that g.m.p != nil, and this function may not have P
  1260  // in some contexts (e.g. a panic in a signal handler for a signal
  1261  // sent to an M with no P).
  1262  //
  1263  //go:nowritebarrierrec
  1264  func startpanic_m() bool {
  1265  	gp := getg()
  1266  	if mheap_.cachealloc.size == 0 { // very early
  1267  		print("runtime: panic before malloc heap initialized\n")
  1268  	}
  1269  	// Disallow malloc during an unrecoverable panic. A panic
  1270  	// could happen in a signal handler, or in a throw, or inside
  1271  	// malloc itself. We want to catch if an allocation ever does
  1272  	// happen (even if we're not in one of these situations).
  1273  	gp.m.mallocing++
  1274  
  1275  	// If we're dying because of a bad lock count, set it to a
  1276  	// good lock count so we don't recursively panic below.
  1277  	if gp.m.locks < 0 {
  1278  		gp.m.locks = 1
  1279  	}
  1280  
  1281  	switch gp.m.dying {
  1282  	case 0:
  1283  		// Setting dying >0 has the side-effect of disabling this G's writebuf.
  1284  		gp.m.dying = 1
  1285  		panicking.Add(1)
  1286  		lock(&paniclk)
  1287  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
  1288  			schedtrace(true)
  1289  		}
  1290  		freezetheworld()
  1291  		return true
  1292  	case 1:
  1293  		// Something failed while panicking.
  1294  		// Just print a stack trace and exit.
  1295  		gp.m.dying = 2
  1296  		print("panic during panic\n")
  1297  		return false
  1298  	case 2:
  1299  		// This is a genuine bug in the runtime, we couldn't even
  1300  		// print the stack trace successfully.
  1301  		gp.m.dying = 3
  1302  		print("stack trace unavailable\n")
  1303  		exit(4)
  1304  		fallthrough
  1305  	default:
  1306  		// Can't even print! Just exit.
  1307  		exit(5)
  1308  		return false // Need to return something.
  1309  	}
  1310  }
  1311  
  1312  var didothers bool
  1313  var deadlock mutex
  1314  
  1315  // gp is the crashing g running on this M, but may be a user G, while getg() is
  1316  // always g0.
  1317  func dopanic_m(gp *g, pc, sp uintptr) bool {
  1318  	if gp.sig != 0 {
  1319  		signame := signame(gp.sig)
  1320  		if signame != "" {
  1321  			print("[signal ", signame)
  1322  		} else {
  1323  			print("[signal ", hex(gp.sig))
  1324  		}
  1325  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
  1326  	}
  1327  
  1328  	level, all, docrash := gotraceback()
  1329  	if level > 0 {
  1330  		if gp != gp.m.curg {
  1331  			all = true
  1332  		}
  1333  		if gp != gp.m.g0 {
  1334  			print("\n")
  1335  			goroutineheader(gp)
  1336  			traceback(pc, sp, 0, gp)
  1337  		} else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
  1338  			print("\nruntime stack:\n")
  1339  			traceback(pc, sp, 0, gp)
  1340  		}
  1341  		if !didothers && all {
  1342  			didothers = true
  1343  			tracebackothers(gp)
  1344  		}
  1345  	}
  1346  	unlock(&paniclk)
  1347  
  1348  	if panicking.Add(-1) != 0 {
  1349  		// Some other m is panicking too.
  1350  		// Let it print what it needs to print.
  1351  		// Wait forever without chewing up cpu.
  1352  		// It will exit when it's done.
  1353  		lock(&deadlock)
  1354  		lock(&deadlock)
  1355  	}
  1356  
  1357  	printDebugLog()
  1358  
  1359  	return docrash
  1360  }
  1361  
  1362  // canpanic returns false if a signal should throw instead of
  1363  // panicking.
  1364  //
  1365  //go:nosplit
  1366  func canpanic() bool {
  1367  	gp := getg()
  1368  	mp := acquirem()
  1369  
  1370  	// Is it okay for gp to panic instead of crashing the program?
  1371  	// Yes, as long as it is running Go code, not runtime code,
  1372  	// and not stuck in a system call.
  1373  	if gp != mp.curg {
  1374  		releasem(mp)
  1375  		return false
  1376  	}
  1377  	// N.B. mp.locks != 1 instead of 0 to account for acquirem.
  1378  	if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
  1379  		releasem(mp)
  1380  		return false
  1381  	}
  1382  	status := readgstatus(gp)
  1383  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
  1384  		releasem(mp)
  1385  		return false
  1386  	}
  1387  	if GOOS == "windows" && mp.libcallsp != 0 {
  1388  		releasem(mp)
  1389  		return false
  1390  	}
  1391  	releasem(mp)
  1392  	return true
  1393  }
  1394  
  1395  // shouldPushSigpanic reports whether pc should be used as sigpanic's
  1396  // return PC (pushing a frame for the call). Otherwise, it should be
  1397  // left alone so that LR is used as sigpanic's return PC, effectively
  1398  // replacing the top-most frame with sigpanic. This is used by
  1399  // preparePanic.
  1400  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
  1401  	if pc == 0 {
  1402  		// Probably a call to a nil func. The old LR is more
  1403  		// useful in the stack trace. Not pushing the frame
  1404  		// will make the trace look like a call to sigpanic
  1405  		// instead. (Otherwise the trace will end at sigpanic
  1406  		// and we won't get to see who faulted.)
  1407  		return false
  1408  	}
  1409  	// If we don't recognize the PC as code, but we do recognize
  1410  	// the link register as code, then this assumes the panic was
  1411  	// caused by a call to non-code. In this case, we want to
  1412  	// ignore this call to make unwinding show the context.
  1413  	//
  1414  	// If we running C code, we're not going to recognize pc as a
  1415  	// Go function, so just assume it's good. Otherwise, traceback
  1416  	// may try to read a stale LR that looks like a Go code
  1417  	// pointer and wander into the woods.
  1418  	if gp.m.incgo || findfunc(pc).valid() {
  1419  		// This wasn't a bad call, so use PC as sigpanic's
  1420  		// return PC.
  1421  		return true
  1422  	}
  1423  	if findfunc(lr).valid() {
  1424  		// This was a bad call, but the LR is good, so use the
  1425  		// LR as sigpanic's return PC.
  1426  		return false
  1427  	}
  1428  	// Neither the PC or LR is good. Hopefully pushing a frame
  1429  	// will work.
  1430  	return true
  1431  }
  1432  
  1433  // isAbortPC reports whether pc is the program counter at which
  1434  // runtime.abort raises a signal.
  1435  //
  1436  // It is nosplit because it's part of the isgoexception
  1437  // implementation.
  1438  //
  1439  //go:nosplit
  1440  func isAbortPC(pc uintptr) bool {
  1441  	f := findfunc(pc)
  1442  	if !f.valid() {
  1443  		return false
  1444  	}
  1445  	return f.funcID == abi.FuncID_abort
  1446  }
  1447  

View as plain text