Source file src/runtime/panic.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"internal/runtime/sys"
    12  	"internal/stringslite"
    13  	"unsafe"
    14  )
    15  
    16  // throwType indicates the current type of ongoing throw, which affects the
    17  // amount of detail printed to stderr. Higher values include more detail.
    18  type throwType uint32
    19  
    20  const (
    21  	// throwTypeNone means that we are not throwing.
    22  	throwTypeNone throwType = iota
    23  
    24  	// throwTypeUser is a throw due to a problem with the application.
    25  	//
    26  	// These throws do not include runtime frames, system goroutines, or
    27  	// frame metadata.
    28  	throwTypeUser
    29  
    30  	// throwTypeRuntime is a throw due to a problem with Go itself.
    31  	//
    32  	// These throws include as much information as possible to aid in
    33  	// debugging the runtime, including runtime frames, system goroutines,
    34  	// and frame metadata.
    35  	throwTypeRuntime
    36  )
    37  
    38  // We have two different ways of doing defers. The older way involves creating a
    39  // defer record at the time that a defer statement is executing and adding it to a
    40  // defer chain. This chain is inspected by the deferreturn call at all function
    41  // exits in order to run the appropriate defer calls. A cheaper way (which we call
    42  // open-coded defers) is used for functions in which no defer statements occur in
    43  // loops. In that case, we simply store the defer function/arg information into
    44  // specific stack slots at the point of each defer statement, as well as setting a
    45  // bit in a bitmask. At each function exit, we add inline code to directly make
    46  // the appropriate defer calls based on the bitmask and fn/arg information stored
    47  // on the stack. During panic/Goexit processing, the appropriate defer calls are
    48  // made using extra funcdata info that indicates the exact stack slots that
    49  // contain the bitmask and defer fn/args.
    50  
    51  // Check to make sure we can really generate a panic. If the panic
    52  // was generated from the runtime, or from inside malloc, then convert
    53  // to a throw of msg.
    54  // pc should be the program counter of the compiler-generated code that
    55  // triggered this panic.
    56  func panicCheck1(pc uintptr, msg string) {
    57  	if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") {
    58  		// Note: wasm can't tail call, so we can't get the original caller's pc.
    59  		throw(msg)
    60  	}
    61  	// TODO: is this redundant? How could we be in malloc
    62  	// but not in the runtime? internal/runtime/*, maybe?
    63  	gp := getg()
    64  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    65  		throw(msg)
    66  	}
    67  }
    68  
    69  // Same as above, but calling from the runtime is allowed.
    70  //
    71  // Using this function is necessary for any panic that may be
    72  // generated by runtime.sigpanic, since those are always called by the
    73  // runtime.
    74  func panicCheck2(err string) {
    75  	// panic allocates, so to avoid recursive malloc, turn panics
    76  	// during malloc into throws.
    77  	gp := getg()
    78  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    79  		throw(err)
    80  	}
    81  }
    82  
    83  // Many of the following panic entry-points turn into throws when they
    84  // happen in various runtime contexts. These should never happen in
    85  // the runtime, and if they do, they indicate a serious issue and
    86  // should not be caught by user code.
    87  //
    88  // The panic{Index,Slice,divide,shift} functions are called by
    89  // code generated by the compiler for out of bounds index expressions,
    90  // out of bounds slice expressions, division by zero, and shift by negative.
    91  // The panicdivide (again), panicoverflow, panicfloat, and panicmem
    92  // functions are called by the signal handler when a signal occurs
    93  // indicating the respective problem.
    94  //
    95  // Since panic{Index,Slice,shift} are never called directly, and
    96  // since the runtime package should never have an out of bounds slice
    97  // or array reference or negative shift, if we see those functions called from the
    98  // runtime package we turn the panic into a throw. That will dump the
    99  // entire runtime stack for easier debugging.
   100  //
   101  // The entry points called by the signal handler will be called from
   102  // runtime.sigpanic, so we can't disallow calls from the runtime to
   103  // these (they always look like they're called from the runtime).
   104  // Hence, for these, we just check for clearly bad runtime conditions.
   105  //
   106  // The goPanic{Index,Slice} functions are only used by wasm. All the other architectures
   107  // use panic{Bounds,Extend} in assembly, which then call to panicBounds{64,32,32X}.
   108  
   109  // failures in the comparisons for s[x], 0 <= x < y (y == len(s))
   110  //
   111  //go:yeswritebarrierrec
   112  func goPanicIndex(x int, y int) {
   113  	panicCheck1(sys.GetCallerPC(), "index out of range")
   114  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsIndex})
   115  }
   116  
   117  //go:yeswritebarrierrec
   118  func goPanicIndexU(x uint, y int) {
   119  	panicCheck1(sys.GetCallerPC(), "index out of range")
   120  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsIndex})
   121  }
   122  
   123  // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
   124  //
   125  //go:yeswritebarrierrec
   126  func goPanicSliceAlen(x int, y int) {
   127  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   128  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAlen})
   129  }
   130  
   131  //go:yeswritebarrierrec
   132  func goPanicSliceAlenU(x uint, y int) {
   133  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   134  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAlen})
   135  }
   136  
   137  //go:yeswritebarrierrec
   138  func goPanicSliceAcap(x int, y int) {
   139  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   140  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAcap})
   141  }
   142  
   143  //go:yeswritebarrierrec
   144  func goPanicSliceAcapU(x uint, y int) {
   145  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   146  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAcap})
   147  }
   148  
   149  // failures in the comparisons for s[x:y], 0 <= x <= y
   150  //
   151  //go:yeswritebarrierrec
   152  func goPanicSliceB(x int, y int) {
   153  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   154  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceB})
   155  }
   156  
   157  //go:yeswritebarrierrec
   158  func goPanicSliceBU(x uint, y int) {
   159  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   160  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceB})
   161  }
   162  
   163  // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
   164  func goPanicSlice3Alen(x int, y int) {
   165  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   166  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Alen})
   167  }
   168  func goPanicSlice3AlenU(x uint, y int) {
   169  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   170  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Alen})
   171  }
   172  func goPanicSlice3Acap(x int, y int) {
   173  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   174  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Acap})
   175  }
   176  func goPanicSlice3AcapU(x uint, y int) {
   177  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   178  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Acap})
   179  }
   180  
   181  // failures in the comparisons for s[:x:y], 0 <= x <= y
   182  func goPanicSlice3B(x int, y int) {
   183  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   184  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3B})
   185  }
   186  func goPanicSlice3BU(x uint, y int) {
   187  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   188  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3B})
   189  }
   190  
   191  // failures in the comparisons for s[x:y:], 0 <= x <= y
   192  func goPanicSlice3C(x int, y int) {
   193  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   194  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3C})
   195  }
   196  func goPanicSlice3CU(x uint, y int) {
   197  	panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
   198  	panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3C})
   199  }
   200  
   201  // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
   202  func goPanicSliceConvert(x int, y int) {
   203  	panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array")
   204  	panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsConvert})
   205  }
   206  
   207  // Implemented in assembly. Declared here to mark them as ABIInternal.
   208  func panicBounds() // in asm_GOARCH.s files, called from generated code
   209  func panicExtend() // in asm_GOARCH.s files, called from generated code (on 32-bit archs)
   210  
   211  func panicBounds64(pc uintptr, regs *[16]int64) { // called from panicBounds on 64-bit archs
   212  	f := findfunc(pc)
   213  	v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
   214  
   215  	code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
   216  
   217  	if code == abi.BoundsIndex {
   218  		panicCheck1(pc, "index out of range")
   219  	} else {
   220  		panicCheck1(pc, "slice bounds out of range")
   221  	}
   222  
   223  	var e boundsError
   224  	e.code = code
   225  	e.signed = signed
   226  	if xIsReg {
   227  		e.x = regs[xVal]
   228  	} else {
   229  		e.x = int64(xVal)
   230  	}
   231  	if yIsReg {
   232  		e.y = int(regs[yVal])
   233  	} else {
   234  		e.y = yVal
   235  	}
   236  	panic(e)
   237  }
   238  
   239  func panicBounds32(pc uintptr, regs *[16]int32) { // called from panicBounds on 32-bit archs
   240  	f := findfunc(pc)
   241  	v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
   242  
   243  	code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
   244  
   245  	if code == abi.BoundsIndex {
   246  		panicCheck1(pc, "index out of range")
   247  	} else {
   248  		panicCheck1(pc, "slice bounds out of range")
   249  	}
   250  
   251  	var e boundsError
   252  	e.code = code
   253  	e.signed = signed
   254  	if xIsReg {
   255  		if signed {
   256  			e.x = int64(regs[xVal])
   257  		} else {
   258  			e.x = int64(uint32(regs[xVal]))
   259  		}
   260  	} else {
   261  		e.x = int64(xVal)
   262  	}
   263  	if yIsReg {
   264  		e.y = int(regs[yVal])
   265  	} else {
   266  		e.y = yVal
   267  	}
   268  	panic(e)
   269  }
   270  
   271  func panicBounds32X(pc uintptr, regs *[16]int32) { // called from panicExtend on 32-bit archs
   272  	f := findfunc(pc)
   273  	v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
   274  
   275  	code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
   276  
   277  	if code == abi.BoundsIndex {
   278  		panicCheck1(pc, "index out of range")
   279  	} else {
   280  		panicCheck1(pc, "slice bounds out of range")
   281  	}
   282  
   283  	var e boundsError
   284  	e.code = code
   285  	e.signed = signed
   286  	if xIsReg {
   287  		// Our 4-bit register numbers are actually 2 2-bit register numbers.
   288  		lo := xVal & 3
   289  		hi := xVal >> 2
   290  		e.x = int64(regs[hi])<<32 + int64(uint32(regs[lo]))
   291  	} else {
   292  		e.x = int64(xVal)
   293  	}
   294  	if yIsReg {
   295  		e.y = int(regs[yVal])
   296  	} else {
   297  		e.y = yVal
   298  	}
   299  	panic(e)
   300  }
   301  
   302  var shiftError = error(errorString("negative shift amount"))
   303  
   304  //go:yeswritebarrierrec
   305  func panicshift() {
   306  	panicCheck1(sys.GetCallerPC(), "negative shift amount")
   307  	panic(shiftError)
   308  }
   309  
   310  var divideError = error(errorString("integer divide by zero"))
   311  
   312  //go:yeswritebarrierrec
   313  func panicdivide() {
   314  	panicCheck2("integer divide by zero")
   315  	panic(divideError)
   316  }
   317  
   318  var overflowError = error(errorString("integer overflow"))
   319  
   320  func panicoverflow() {
   321  	panicCheck2("integer overflow")
   322  	panic(overflowError)
   323  }
   324  
   325  var floatError = error(errorString("floating point error"))
   326  
   327  func panicfloat() {
   328  	panicCheck2("floating point error")
   329  	panic(floatError)
   330  }
   331  
   332  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
   333  
   334  func panicmem() {
   335  	panicCheck2("invalid memory address or nil pointer dereference")
   336  	panic(memoryError)
   337  }
   338  
   339  func panicmemAddr(addr uintptr) {
   340  	panicCheck2("invalid memory address or nil pointer dereference")
   341  	panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
   342  }
   343  
   344  var simdImmError = error(errorString("out-of-range immediate for simd intrinsic"))
   345  
   346  func panicSimdImm() {
   347  	panicCheck2("simd immediate error")
   348  	panic(simdImmError)
   349  }
   350  
   351  // Create a new deferred function fn, which has no arguments and results.
   352  // The compiler turns a defer statement into a call to this.
   353  func deferproc(fn func()) {
   354  	gp := getg()
   355  	if gp.m.curg != gp {
   356  		// go code on the system stack can't defer
   357  		throw("defer on system stack")
   358  	}
   359  
   360  	d := newdefer()
   361  	d.link = gp._defer
   362  	gp._defer = d
   363  	d.fn = fn
   364  	d.pc = sys.GetCallerPC()
   365  	// We must not be preempted between calling GetCallerSP and
   366  	// storing it to d.sp because GetCallerSP's result is a
   367  	// uintptr stack pointer.
   368  	d.sp = sys.GetCallerSP()
   369  }
   370  
   371  var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
   372  var rangePanicError = error(errorString("range function continued iteration after loop body panic"))
   373  var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit"))
   374  var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking"))
   375  
   376  //go:noinline
   377  func panicrangestate(state int) {
   378  	switch abi.RF_State(state) {
   379  	case abi.RF_DONE:
   380  		panic(rangeDoneError)
   381  	case abi.RF_PANIC:
   382  		panic(rangePanicError)
   383  	case abi.RF_EXHAUSTED:
   384  		panic(rangeExhaustedError)
   385  	case abi.RF_MISSING_PANIC:
   386  		panic(rangeMissingPanicError)
   387  	}
   388  	throw("unexpected state passed to panicrangestate")
   389  }
   390  
   391  // deferrangefunc is called by functions that are about to
   392  // execute a range-over-function loop in which the loop body
   393  // may execute a defer statement. That defer needs to add to
   394  // the chain for the current function, not the func literal synthesized
   395  // to represent the loop body. To do that, the original function
   396  // calls deferrangefunc to obtain an opaque token representing
   397  // the current frame, and then the loop body uses deferprocat
   398  // instead of deferproc to add to that frame's defer lists.
   399  //
   400  // The token is an 'any' with underlying type *atomic.Pointer[_defer].
   401  // It is the atomically-updated head of a linked list of _defer structs
   402  // representing deferred calls. At the same time, we create a _defer
   403  // struct on the main g._defer list with d.head set to this head pointer.
   404  //
   405  // The g._defer list is now a linked list of deferred calls,
   406  // but an atomic list hanging off:
   407  //
   408  //		g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
   409  //	                             | .head
   410  //	                             |
   411  //	                             +--> dY -> dX -> nil
   412  //
   413  // with each -> indicating a d.link pointer, and where drangefunc
   414  // has the d.rangefunc = true bit set.
   415  // Note that the function being ranged over may have added
   416  // its own defers (d4 and d3), so drangefunc need not be at the
   417  // top of the list when deferprocat is used. This is why we pass
   418  // the atomic head explicitly.
   419  //
   420  // To keep misbehaving programs from crashing the runtime,
   421  // deferprocat pushes new defers onto the .head list atomically.
   422  // The fact that it is a separate list from the main goroutine
   423  // defer list means that the main goroutine's defers can still
   424  // be handled non-atomically.
   425  //
   426  // In the diagram, dY and dX are meant to be processed when
   427  // drangefunc would be processed, which is to say the defer order
   428  // should be d4, d3, dY, dX, d2, d1. To make that happen,
   429  // when defer processing reaches a d with rangefunc=true,
   430  // it calls deferconvert to atomically take the extras
   431  // away from d.head and then adds them to the main list.
   432  //
   433  // That is, deferconvert changes this list:
   434  //
   435  //		g._defer => drangefunc -> d2 -> d1 -> nil
   436  //	                 | .head
   437  //	                 |
   438  //	                 +--> dY -> dX -> nil
   439  //
   440  // into this list:
   441  //
   442  //	g._defer => dY -> dX -> d2 -> d1 -> nil
   443  //
   444  // It also poisons *drangefunc.head so that any future
   445  // deferprocat using that head will throw.
   446  // (The atomic head is ordinary garbage collected memory so that
   447  // it's not a problem if user code holds onto it beyond
   448  // the lifetime of drangefunc.)
   449  //
   450  // TODO: We could arrange for the compiler to call into the
   451  // runtime after the loop finishes normally, to do an eager
   452  // deferconvert, which would catch calling the loop body
   453  // and having it defer after the loop is done. If we have a
   454  // more general catch of loop body misuse, though, this
   455  // might not be worth worrying about in addition.
   456  //
   457  // See also ../cmd/compile/internal/rangefunc/rewrite.go.
   458  func deferrangefunc() any {
   459  	gp := getg()
   460  	if gp.m.curg != gp {
   461  		// go code on the system stack can't defer
   462  		throw("defer on system stack")
   463  	}
   464  
   465  	d := newdefer()
   466  	d.link = gp._defer
   467  	gp._defer = d
   468  	d.pc = sys.GetCallerPC()
   469  	// We must not be preempted between calling GetCallerSP and
   470  	// storing it to d.sp because GetCallerSP's result is a
   471  	// uintptr stack pointer.
   472  	d.sp = sys.GetCallerSP()
   473  
   474  	d.rangefunc = true
   475  	d.head = new(atomic.Pointer[_defer])
   476  
   477  	return d.head
   478  }
   479  
   480  // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
   481  func badDefer() *_defer {
   482  	return (*_defer)(unsafe.Pointer(uintptr(1)))
   483  }
   484  
   485  // deferprocat is like deferproc but adds to the atomic list represented by frame.
   486  // See the doc comment for deferrangefunc for details.
   487  func deferprocat(fn func(), frame any) {
   488  	head := frame.(*atomic.Pointer[_defer])
   489  	if raceenabled {
   490  		racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat))
   491  	}
   492  	d1 := newdefer()
   493  	d1.fn = fn
   494  	for {
   495  		d1.link = head.Load()
   496  		if d1.link == badDefer() {
   497  			throw("defer after range func returned")
   498  		}
   499  		if head.CompareAndSwap(d1.link, d1) {
   500  			break
   501  		}
   502  	}
   503  }
   504  
   505  // deferconvert converts the rangefunc defer list of d0 into an ordinary list
   506  // following d0.
   507  // See the doc comment for deferrangefunc for details.
   508  func deferconvert(d0 *_defer) {
   509  	head := d0.head
   510  	if raceenabled {
   511  		racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert))
   512  	}
   513  	tail := d0.link
   514  	d0.rangefunc = false
   515  
   516  	var d *_defer
   517  	for {
   518  		d = head.Load()
   519  		if head.CompareAndSwap(d, badDefer()) {
   520  			break
   521  		}
   522  	}
   523  	if d == nil {
   524  		return
   525  	}
   526  	for d1 := d; ; d1 = d1.link {
   527  		d1.sp = d0.sp
   528  		d1.pc = d0.pc
   529  		if d1.link == nil {
   530  			d1.link = tail
   531  			break
   532  		}
   533  	}
   534  	d0.link = d
   535  	return
   536  }
   537  
   538  // deferprocStack queues a new deferred function with a defer record on the stack.
   539  // The defer record must have its fn field initialized.
   540  // All other fields can contain junk.
   541  // Nosplit because of the uninitialized pointer fields on the stack.
   542  //
   543  //go:nosplit
   544  func deferprocStack(d *_defer) {
   545  	gp := getg()
   546  	if gp.m.curg != gp {
   547  		// go code on the system stack can't defer
   548  		throw("defer on system stack")
   549  	}
   550  
   551  	// fn is already set.
   552  	// The other fields are junk on entry to deferprocStack and
   553  	// are initialized here.
   554  	d.heap = false
   555  	d.rangefunc = false
   556  	d.sp = sys.GetCallerSP()
   557  	d.pc = sys.GetCallerPC()
   558  	// The lines below implement:
   559  	//   d.link = gp._defer
   560  	//   d.head = nil
   561  	//   gp._defer = d
   562  	// But without write barriers. The first two are writes to
   563  	// the stack so they don't need a write barrier, and furthermore
   564  	// are to uninitialized memory, so they must not use a write barrier.
   565  	// The third write does not require a write barrier because we
   566  	// explicitly mark all the defer structures, so we don't need to
   567  	// keep track of pointers to them with a write barrier.
   568  	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
   569  	*(*uintptr)(unsafe.Pointer(&d.head)) = 0
   570  	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
   571  }
   572  
   573  // Each P holds a pool for defers.
   574  
   575  // Allocate a Defer, usually using per-P pool.
   576  // Each defer must be released with freedefer.  The defer is not
   577  // added to any defer chain yet.
   578  func newdefer() *_defer {
   579  	var d *_defer
   580  	mp := acquirem()
   581  	pp := mp.p.ptr()
   582  	if len(pp.deferpool) == 0 && sched.deferpool != nil {
   583  		lock(&sched.deferlock)
   584  		for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
   585  			d := sched.deferpool
   586  			sched.deferpool = d.link
   587  			d.link = nil
   588  			pp.deferpool = append(pp.deferpool, d)
   589  		}
   590  		unlock(&sched.deferlock)
   591  	}
   592  	if n := len(pp.deferpool); n > 0 {
   593  		d = pp.deferpool[n-1]
   594  		pp.deferpool[n-1] = nil
   595  		pp.deferpool = pp.deferpool[:n-1]
   596  	}
   597  	releasem(mp)
   598  	mp, pp = nil, nil
   599  
   600  	if d == nil {
   601  		// Allocate new defer.
   602  		d = new(_defer)
   603  	}
   604  	d.heap = true
   605  	return d
   606  }
   607  
   608  // popDefer pops the head of gp's defer list and frees it.
   609  func popDefer(gp *g) {
   610  	d := gp._defer
   611  	d.fn = nil // Can in theory point to the stack
   612  	// We must not copy the stack between the updating gp._defer and setting
   613  	// d.link to nil. Between these two steps, d is not on any defer list, so
   614  	// stack copying won't adjust stack pointers in it (namely, d.link). Hence,
   615  	// if we were to copy the stack, d could then contain a stale pointer.
   616  	gp._defer = d.link
   617  	d.link = nil
   618  	// After this point we can copy the stack.
   619  
   620  	if !d.heap {
   621  		return
   622  	}
   623  
   624  	mp := acquirem()
   625  	pp := mp.p.ptr()
   626  	if len(pp.deferpool) == cap(pp.deferpool) {
   627  		// Transfer half of local cache to the central cache.
   628  		var first, last *_defer
   629  		for len(pp.deferpool) > cap(pp.deferpool)/2 {
   630  			n := len(pp.deferpool)
   631  			d := pp.deferpool[n-1]
   632  			pp.deferpool[n-1] = nil
   633  			pp.deferpool = pp.deferpool[:n-1]
   634  			if first == nil {
   635  				first = d
   636  			} else {
   637  				last.link = d
   638  			}
   639  			last = d
   640  		}
   641  		lock(&sched.deferlock)
   642  		last.link = sched.deferpool
   643  		sched.deferpool = first
   644  		unlock(&sched.deferlock)
   645  	}
   646  
   647  	*d = _defer{}
   648  
   649  	pp.deferpool = append(pp.deferpool, d)
   650  
   651  	releasem(mp)
   652  	mp, pp = nil, nil
   653  }
   654  
   655  // deferreturn runs deferred functions for the caller's frame.
   656  // The compiler inserts a call to this at the end of any
   657  // function which calls defer.
   658  func deferreturn() {
   659  	var p _panic
   660  	p.deferreturn = true
   661  
   662  	p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
   663  	for {
   664  		fn, ok := p.nextDefer()
   665  		if !ok {
   666  			break
   667  		}
   668  		fn()
   669  	}
   670  }
   671  
   672  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   673  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   674  // is not a panic, any recover calls in those deferred functions will return nil.
   675  //
   676  // Calling Goexit from the main goroutine terminates that goroutine
   677  // without func main returning. Since func main has not returned,
   678  // the program continues execution of other goroutines.
   679  // If all other goroutines exit, the program crashes.
   680  //
   681  // It crashes if called from a thread not created by the Go runtime.
   682  func Goexit() {
   683  	// Create a panic object for Goexit, so we can recognize when it might be
   684  	// bypassed by a recover().
   685  	var p _panic
   686  	p.goexit = true
   687  
   688  	p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
   689  	for {
   690  		fn, ok := p.nextDefer()
   691  		if !ok {
   692  			break
   693  		}
   694  		fn()
   695  	}
   696  
   697  	goexit1()
   698  }
   699  
   700  // Call all Error and String methods before freezing the world.
   701  // Used when crashing with panicking.
   702  func preprintpanics(p *_panic) {
   703  	defer func() {
   704  		text := "panic while printing panic value"
   705  		switch r := recover().(type) {
   706  		case nil:
   707  			// nothing to do
   708  		case string:
   709  			throw(text + ": " + r)
   710  		default:
   711  			throw(text + ": type " + toRType(efaceOf(&r)._type).string())
   712  		}
   713  	}()
   714  	for p != nil {
   715  		if p.link != nil && *efaceOf(&p.link.arg) == *efaceOf(&p.arg) {
   716  			// This panic contains the same value as the next one in the chain.
   717  			// Mark it as repanicked. We will skip printing it twice in a row.
   718  			p.link.repanicked = true
   719  			p = p.link
   720  			continue
   721  		}
   722  		switch v := p.arg.(type) {
   723  		case error:
   724  			p.arg = v.Error()
   725  		case stringer:
   726  			p.arg = v.String()
   727  		}
   728  		p = p.link
   729  	}
   730  }
   731  
   732  // Print all currently active panics. Used when crashing.
   733  // Should only be called after preprintpanics.
   734  func printpanics(p *_panic) {
   735  	if p.link != nil {
   736  		printpanics(p.link)
   737  		if p.link.repanicked {
   738  			return
   739  		}
   740  		if !p.link.goexit {
   741  			print("\t")
   742  		}
   743  	}
   744  	if p.goexit {
   745  		return
   746  	}
   747  	print("panic: ")
   748  	printpanicval(p.arg)
   749  	if p.recovered && p.repanicked {
   750  		print(" [recovered, repanicked]")
   751  	} else if p.recovered {
   752  		print(" [recovered]")
   753  	}
   754  	print("\n")
   755  }
   756  
   757  // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
   758  // uint32 and a pointer to the byte following the varint.
   759  //
   760  // The implementation is the same with runtime.readvarint, except that this function
   761  // uses unsafe.Pointer for speed.
   762  func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
   763  	var r uint32
   764  	var shift int
   765  	for {
   766  		b := *(*uint8)(fd)
   767  		fd = add(fd, unsafe.Sizeof(b))
   768  		if b < 128 {
   769  			return r + uint32(b)<<shift, fd
   770  		}
   771  		r += uint32(b&0x7F) << (shift & 31)
   772  		shift += 7
   773  		if shift > 28 {
   774  			panic("Bad varint")
   775  		}
   776  	}
   777  }
   778  
   779  // A PanicNilError happens when code calls panic(nil).
   780  //
   781  // Before Go 1.21, programs that called panic(nil) observed recover returning nil.
   782  // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
   783  // Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
   784  type PanicNilError struct {
   785  	// This field makes PanicNilError structurally different from
   786  	// any other struct in this package, and the _ makes it different
   787  	// from any struct in other packages too.
   788  	// This avoids any accidental conversions being possible
   789  	// between this struct and some other struct sharing the same fields,
   790  	// like happened in go.dev/issue/56603.
   791  	_ [0]*PanicNilError
   792  }
   793  
   794  func (*PanicNilError) Error() string { return "panic called with nil argument" }
   795  func (*PanicNilError) RuntimeError() {}
   796  
   797  var panicnil = &godebugInc{name: "panicnil"}
   798  
   799  // The implementation of the predeclared function panic.
   800  // The compiler emits calls to this function.
   801  //
   802  // gopanic should be an internal detail,
   803  // but historically, widely used packages access it using linkname.
   804  //
   805  // Do not remove or change the type signature.
   806  // See go.dev/issue/67401.
   807  //
   808  //go:linkname gopanic
   809  func gopanic(e any) {
   810  	if e == nil {
   811  		if debug.panicnil.Load() != 1 {
   812  			e = new(PanicNilError)
   813  		} else {
   814  			panicnil.IncNonDefault()
   815  		}
   816  	}
   817  
   818  	gp := getg()
   819  	if gp.m.curg != gp {
   820  		print("panic: ")
   821  		printpanicval(e)
   822  		print("\n")
   823  		throw("panic on system stack")
   824  	}
   825  
   826  	if gp.m.mallocing != 0 {
   827  		print("panic: ")
   828  		printpanicval(e)
   829  		print("\n")
   830  		throw("panic during malloc")
   831  	}
   832  	if gp.m.preemptoff != "" {
   833  		print("panic: ")
   834  		printpanicval(e)
   835  		print("\n")
   836  		print("preempt off reason: ")
   837  		print(gp.m.preemptoff)
   838  		print("\n")
   839  		throw("panic during preemptoff")
   840  	}
   841  	if gp.m.locks != 0 {
   842  		print("panic: ")
   843  		printpanicval(e)
   844  		print("\n")
   845  		throw("panic holding locks")
   846  	}
   847  
   848  	var p _panic
   849  	p.arg = e
   850  	p.gopanicFP = unsafe.Pointer(sys.GetCallerSP())
   851  
   852  	runningPanicDefers.Add(1)
   853  
   854  	p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
   855  	for {
   856  		fn, ok := p.nextDefer()
   857  		if !ok {
   858  			break
   859  		}
   860  		fn()
   861  	}
   862  
   863  	// If we're tracing, flush the current generation to make the trace more
   864  	// readable.
   865  	//
   866  	// TODO(aktau): Handle a panic from within traceAdvance more gracefully.
   867  	// Currently it would hang. Not handled now because it is very unlikely, and
   868  	// already unrecoverable.
   869  	if traceEnabled() {
   870  		traceAdvance(false)
   871  	}
   872  
   873  	// ran out of deferred calls - old-school panic now
   874  	// Because it is unsafe to call arbitrary user code after freezing
   875  	// the world, we call preprintpanics to invoke all necessary Error
   876  	// and String methods to prepare the panic strings before startpanic.
   877  	preprintpanics(&p)
   878  
   879  	fatalpanic(&p)   // should not return
   880  	*(*int)(nil) = 0 // not reached
   881  }
   882  
   883  // start initializes a panic to start unwinding the stack.
   884  //
   885  // If p.goexit is true, then start may return multiple times.
   886  func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
   887  	gp := getg()
   888  
   889  	// Record the caller's PC and SP, so recovery can identify panics
   890  	// that have been recovered. Also, so that if p is from Goexit, we
   891  	// can restart its defer processing loop if a recovered panic tries
   892  	// to jump past it.
   893  	p.startPC = sys.GetCallerPC()
   894  	p.startSP = unsafe.Pointer(sys.GetCallerSP())
   895  
   896  	if p.deferreturn {
   897  		p.sp = sp
   898  
   899  		if s := (*savedOpenDeferState)(gp.param); s != nil {
   900  			// recovery saved some state for us, so that we can resume
   901  			// calling open-coded defers without unwinding the stack.
   902  
   903  			gp.param = nil
   904  
   905  			p.retpc = s.retpc
   906  			p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
   907  			p.slotsPtr = add(sp, s.slotsOffset)
   908  		}
   909  
   910  		return
   911  	}
   912  
   913  	p.link = gp._panic
   914  	gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
   915  
   916  	// Initialize state machine, and find the first frame with a defer.
   917  	//
   918  	// Note: We could use startPC and startSP here, but callers will
   919  	// never have defer statements themselves. By starting at their
   920  	// caller instead, we avoid needing to unwind through an extra
   921  	// frame. It also somewhat simplifies the terminating condition for
   922  	// deferreturn.
   923  	p.lr, p.fp = pc, sp
   924  	p.nextFrame()
   925  }
   926  
   927  // nextDefer returns the next deferred function to invoke, if any.
   928  //
   929  // Note: The "ok bool" result is necessary to correctly handle when
   930  // the deferred function itself was nil (e.g., "defer (func())(nil)").
   931  func (p *_panic) nextDefer() (func(), bool) {
   932  	gp := getg()
   933  
   934  	if !p.deferreturn {
   935  		if gp._panic != p {
   936  			throw("bad panic stack")
   937  		}
   938  
   939  		if p.recovered {
   940  			mcall(recovery) // does not return
   941  			throw("recovery failed")
   942  		}
   943  	}
   944  
   945  	for {
   946  		for p.deferBitsPtr != nil {
   947  			bits := *p.deferBitsPtr
   948  
   949  			// Check whether any open-coded defers are still pending.
   950  			//
   951  			// Note: We need to check this upfront (rather than after
   952  			// clearing the top bit) because it's possible that Goexit
   953  			// invokes a deferred call, and there were still more pending
   954  			// open-coded defers in the frame; but then the deferred call
   955  			// panic and invoked the remaining defers in the frame, before
   956  			// recovering and restarting the Goexit loop.
   957  			if bits == 0 {
   958  				p.deferBitsPtr = nil
   959  				break
   960  			}
   961  
   962  			// Find index of top bit set.
   963  			i := 7 - uintptr(sys.LeadingZeros8(bits))
   964  
   965  			// Clear bit and store it back.
   966  			bits &^= 1 << i
   967  			*p.deferBitsPtr = bits
   968  
   969  			return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
   970  		}
   971  
   972  	Recheck:
   973  		if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
   974  			if d.rangefunc {
   975  				deferconvert(d)
   976  				popDefer(gp)
   977  				goto Recheck
   978  			}
   979  
   980  			fn := d.fn
   981  
   982  			p.retpc = d.pc
   983  
   984  			// Unlink and free.
   985  			popDefer(gp)
   986  
   987  			return fn, true
   988  		}
   989  
   990  		if !p.nextFrame() {
   991  			return nil, false
   992  		}
   993  	}
   994  }
   995  
   996  // nextFrame finds the next frame that contains deferred calls, if any.
   997  func (p *_panic) nextFrame() (ok bool) {
   998  	if p.lr == 0 {
   999  		return false
  1000  	}
  1001  
  1002  	gp := getg()
  1003  	systemstack(func() {
  1004  		var limit uintptr
  1005  		if d := gp._defer; d != nil {
  1006  			limit = d.sp
  1007  		}
  1008  
  1009  		var u unwinder
  1010  		u.initAt(p.lr, uintptr(p.fp), 0, gp, 0)
  1011  		for {
  1012  			if !u.valid() {
  1013  				p.lr = 0
  1014  				return // ok == false
  1015  			}
  1016  
  1017  			// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
  1018  			// every frame containing a defer (not just open-coded defers),
  1019  			// then we can simply loop until we find the next frame where
  1020  			// it's non-zero.
  1021  
  1022  			if u.frame.sp == limit {
  1023  				break // found a frame with linked defers
  1024  			}
  1025  
  1026  			if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
  1027  				break // found a frame with open-coded defers
  1028  			}
  1029  
  1030  			u.next()
  1031  		}
  1032  
  1033  		p.lr = u.frame.lr
  1034  		p.sp = unsafe.Pointer(u.frame.sp)
  1035  		p.fp = unsafe.Pointer(u.frame.fp)
  1036  
  1037  		ok = true
  1038  	})
  1039  
  1040  	return
  1041  }
  1042  
  1043  func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
  1044  	fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
  1045  	if fd == nil {
  1046  		return false
  1047  	}
  1048  
  1049  	if fn.deferreturn == 0 {
  1050  		throw("missing deferreturn")
  1051  	}
  1052  
  1053  	deferBitsOffset, fd := readvarintUnsafe(fd)
  1054  	deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
  1055  	if *deferBitsPtr == 0 {
  1056  		return false // has open-coded defers, but none pending
  1057  	}
  1058  
  1059  	slotsOffset, fd := readvarintUnsafe(fd)
  1060  
  1061  	p.retpc = fn.entry() + uintptr(fn.deferreturn)
  1062  	p.deferBitsPtr = deferBitsPtr
  1063  	p.slotsPtr = add(varp, -uintptr(slotsOffset))
  1064  
  1065  	return true
  1066  }
  1067  
  1068  // The implementation of the predeclared function recover.
  1069  func gorecover() any {
  1070  	gp := getg()
  1071  	p := gp._panic
  1072  	if p == nil || p.goexit || p.recovered {
  1073  		return nil
  1074  	}
  1075  
  1076  	// Check to see if the function that called recover() was
  1077  	// deferred directly from the panicking function.
  1078  	// For code like:
  1079  	//     func foo() {
  1080  	//         defer bar()
  1081  	//         panic("panic")
  1082  	//     }
  1083  	//     func bar() {
  1084  	//         recover()
  1085  	//     }
  1086  	// Normally the stack would look like this:
  1087  	//     foo
  1088  	//     runtime.gopanic
  1089  	//     bar
  1090  	//     runtime.gorecover
  1091  	//
  1092  	// However, if the function we deferred requires a wrapper
  1093  	// of some sort, we need to ignore the wrapper. In that case,
  1094  	// the stack looks like:
  1095  	//     foo
  1096  	//     runtime.gopanic
  1097  	//     wrapper
  1098  	//     bar
  1099  	//     runtime.gorecover
  1100  	// And we should also successfully recover.
  1101  	//
  1102  	// Finally, in the weird case "defer recover()", the stack looks like:
  1103  	//     foo
  1104  	//     runtime.gopanic
  1105  	//     wrapper
  1106  	//     runtime.gorecover
  1107  	// And we should not recover in that case.
  1108  	//
  1109  	// So our criteria is, there must be exactly one non-wrapper
  1110  	// frame between gopanic and gorecover.
  1111  	//
  1112  	// We don't recover this:
  1113  	//     defer func() { func() { recover() }() }()
  1114  	// because there are 2 non-wrapper frames.
  1115  	//
  1116  	// We don't recover this:
  1117  	//     defer recover()
  1118  	// because there are 0 non-wrapper frames.
  1119  	canRecover := false
  1120  	systemstack(func() {
  1121  		var u unwinder
  1122  		u.init(gp, 0)
  1123  		u.next() // skip systemstack_switch
  1124  		u.next() // skip gorecover
  1125  		nonWrapperFrames := 0
  1126  	loop:
  1127  		for ; u.valid(); u.next() {
  1128  			for iu, f := newInlineUnwinder(u.frame.fn, u.symPC()); f.valid(); f = iu.next(f) {
  1129  				sf := iu.srcFunc(f)
  1130  				switch sf.funcID {
  1131  				case abi.FuncIDWrapper:
  1132  					continue
  1133  				case abi.FuncID_gopanic:
  1134  					if u.frame.fp == uintptr(p.gopanicFP) && nonWrapperFrames > 0 {
  1135  						canRecover = true
  1136  					}
  1137  					break loop
  1138  				default:
  1139  					nonWrapperFrames++
  1140  					if nonWrapperFrames > 1 {
  1141  						break loop
  1142  					}
  1143  				}
  1144  			}
  1145  		}
  1146  	})
  1147  	if !canRecover {
  1148  		return nil
  1149  	}
  1150  	p.recovered = true
  1151  	return p.arg
  1152  }
  1153  
  1154  //go:linkname sync_throw sync.throw
  1155  func sync_throw(s string) {
  1156  	throw(s)
  1157  }
  1158  
  1159  //go:linkname sync_fatal sync.fatal
  1160  func sync_fatal(s string) {
  1161  	fatal(s)
  1162  }
  1163  
  1164  //go:linkname rand_fatal crypto/rand.fatal
  1165  func rand_fatal(s string) {
  1166  	fatal(s)
  1167  }
  1168  
  1169  //go:linkname sysrand_fatal crypto/internal/sysrand.fatal
  1170  func sysrand_fatal(s string) {
  1171  	fatal(s)
  1172  }
  1173  
  1174  //go:linkname fips_fatal crypto/internal/fips140.fatal
  1175  func fips_fatal(s string) {
  1176  	fatal(s)
  1177  }
  1178  
  1179  //go:linkname maps_fatal internal/runtime/maps.fatal
  1180  func maps_fatal(s string) {
  1181  	fatal(s)
  1182  }
  1183  
  1184  //go:linkname internal_sync_throw internal/sync.throw
  1185  func internal_sync_throw(s string) {
  1186  	throw(s)
  1187  }
  1188  
  1189  //go:linkname internal_sync_fatal internal/sync.fatal
  1190  func internal_sync_fatal(s string) {
  1191  	fatal(s)
  1192  }
  1193  
  1194  //go:linkname cgroup_throw internal/runtime/cgroup.throw
  1195  func cgroup_throw(s string) {
  1196  	throw(s)
  1197  }
  1198  
  1199  // throw triggers a fatal error that dumps a stack trace and exits.
  1200  //
  1201  // throw should be used for runtime-internal fatal errors where Go itself,
  1202  // rather than user code, may be at fault for the failure.
  1203  //
  1204  // throw should be an internal detail,
  1205  // but widely used packages access it using linkname.
  1206  // Notable members of the hall of shame include:
  1207  //   - github.com/bytedance/sonic
  1208  //   - github.com/cockroachdb/pebble
  1209  //   - github.com/dgraph-io/ristretto
  1210  //   - github.com/outcaste-io/ristretto
  1211  //   - github.com/pingcap/br
  1212  //   - gvisor.dev/gvisor
  1213  //   - github.com/sagernet/gvisor
  1214  //
  1215  // Do not remove or change the type signature.
  1216  // See go.dev/issue/67401.
  1217  //
  1218  //go:linkname throw
  1219  //go:nosplit
  1220  func throw(s string) {
  1221  	// Everything throw does should be recursively nosplit so it
  1222  	// can be called even when it's unsafe to grow the stack.
  1223  	systemstack(func() {
  1224  		print("fatal error: ")
  1225  		printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
  1226  		print("\n")
  1227  	})
  1228  
  1229  	fatalthrow(throwTypeRuntime)
  1230  }
  1231  
  1232  // fatal triggers a fatal error that dumps a stack trace and exits.
  1233  //
  1234  // fatal is equivalent to throw, but is used when user code is expected to be
  1235  // at fault for the failure, such as racing map writes.
  1236  //
  1237  // fatal does not include runtime frames, system goroutines, or frame metadata
  1238  // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
  1239  //
  1240  //go:nosplit
  1241  func fatal(s string) {
  1242  	p := getg()._panic
  1243  	// Everything fatal does should be recursively nosplit so it
  1244  	// can be called even when it's unsafe to grow the stack.
  1245  	printlock() // Prevent multiple interleaved fatal reports. See issue 69447.
  1246  	systemstack(func() {
  1247  		printPreFatalDeferPanic(p)
  1248  		print("fatal error: ")
  1249  		printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
  1250  		print("\n")
  1251  	})
  1252  
  1253  	fatalthrow(throwTypeUser)
  1254  	printunlock()
  1255  }
  1256  
  1257  // printPreFatalDeferPanic prints the panic
  1258  // when fatal occurs in panics while running defer.
  1259  func printPreFatalDeferPanic(p *_panic) {
  1260  	// Don`t call preprintpanics, because
  1261  	// don't want to call String/Error on the panicked values.
  1262  	// When we fatal we really want to just print and exit,
  1263  	// no more executing user Go code.
  1264  	for x := p; x != nil; x = x.link {
  1265  		if x.link != nil && *efaceOf(&x.link.arg) == *efaceOf(&x.arg) {
  1266  			// This panic contains the same value as the next one in the chain.
  1267  			// Mark it as repanicked. We will skip printing it twice in a row.
  1268  			x.link.repanicked = true
  1269  		}
  1270  	}
  1271  	if p != nil {
  1272  		printpanics(p)
  1273  		// make fatal have the same indentation as non-first panics.
  1274  		print("\t")
  1275  	}
  1276  }
  1277  
  1278  // runningPanicDefers is non-zero while running deferred functions for panic.
  1279  // This is used to try hard to get a panic stack trace out when exiting.
  1280  var runningPanicDefers atomic.Uint32
  1281  
  1282  // panicking is non-zero when crashing the program for an unrecovered panic.
  1283  var panicking atomic.Uint32
  1284  
  1285  // paniclk is held while printing the panic information and stack trace,
  1286  // so that two concurrent panics don't overlap their output.
  1287  var paniclk mutex
  1288  
  1289  // Unwind the stack after a deferred function calls recover
  1290  // after a panic. Then arrange to continue running as though
  1291  // the caller of the deferred function returned normally.
  1292  //
  1293  // However, if unwinding the stack would skip over a Goexit call, we
  1294  // return into the Goexit loop instead, so it can continue processing
  1295  // defers instead.
  1296  func recovery(gp *g) {
  1297  	p := gp._panic
  1298  	pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
  1299  	p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
  1300  
  1301  	// The linker records the f-relative address of a call to deferreturn in f's funcInfo.
  1302  	// Assuming a "normal" call to recover() inside one of f's deferred functions
  1303  	// invoked for a panic, that is the desired PC for exiting f.
  1304  	f := findfunc(pc)
  1305  	if f.deferreturn == 0 {
  1306  		throw("no deferreturn")
  1307  	}
  1308  	gotoPc := f.entry() + uintptr(f.deferreturn)
  1309  
  1310  	// Unwind the panic stack.
  1311  	for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
  1312  		// Don't allow jumping past a pending Goexit.
  1313  		// Instead, have its _panic.start() call return again.
  1314  		//
  1315  		// TODO(mdempsky): In this case, Goexit will resume walking the
  1316  		// stack where it left off, which means it will need to rewalk
  1317  		// frames that we've already processed.
  1318  		//
  1319  		// There's a similar issue with nested panics, when the inner
  1320  		// panic supersedes the outer panic. Again, we end up needing to
  1321  		// walk the same stack frames.
  1322  		//
  1323  		// These are probably pretty rare occurrences in practice, and
  1324  		// they don't seem any worse than the existing logic. But if we
  1325  		// move the unwinding state into _panic, we could detect when we
  1326  		// run into where the last panic started, and then just pick up
  1327  		// where it left off instead.
  1328  		//
  1329  		// With how subtle defer handling is, this might not actually be
  1330  		// worthwhile though.
  1331  		if p.goexit {
  1332  			gotoPc, sp = p.startPC, uintptr(p.startSP)
  1333  			saveOpenDeferState = false // goexit is unwinding the stack anyway
  1334  			break
  1335  		}
  1336  
  1337  		runningPanicDefers.Add(-1)
  1338  	}
  1339  	gp._panic = p
  1340  
  1341  	if p == nil { // must be done with signal
  1342  		gp.sig = 0
  1343  	}
  1344  
  1345  	if gp.param != nil {
  1346  		throw("unexpected gp.param")
  1347  	}
  1348  	if saveOpenDeferState {
  1349  		// If we're returning to deferreturn and there are more open-coded
  1350  		// defers for it to call, save enough state for it to be able to
  1351  		// pick up where p0 left off.
  1352  		gp.param = unsafe.Pointer(&savedOpenDeferState{
  1353  			retpc: p0.retpc,
  1354  
  1355  			// We need to save deferBitsPtr and slotsPtr too, but those are
  1356  			// stack pointers. To avoid issues around heap objects pointing
  1357  			// to the stack, save them as offsets from SP.
  1358  			deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
  1359  			slotsOffset:     uintptr(p0.slotsPtr) - uintptr(p0.sp),
  1360  		})
  1361  	}
  1362  
  1363  	// TODO(mdempsky): Currently, we rely on frames containing "defer"
  1364  	// to end with "CALL deferreturn; RET". This allows deferreturn to
  1365  	// finish running any pending defers in the frame.
  1366  	//
  1367  	// But we should be able to tell whether there are still pending
  1368  	// defers here. If there aren't, we can just jump directly to the
  1369  	// "RET" instruction. And if there are, we don't need an actual
  1370  	// "CALL deferreturn" instruction; we can simulate it with something
  1371  	// like:
  1372  	//
  1373  	//	if usesLR {
  1374  	//		lr = pc
  1375  	//	} else {
  1376  	//		sp -= sizeof(pc)
  1377  	//		*(*uintptr)(sp) = pc
  1378  	//	}
  1379  	//	pc = funcPC(deferreturn)
  1380  	//
  1381  	// So that we effectively tail call into deferreturn, such that it
  1382  	// then returns to the simple "RET" epilogue. That would save the
  1383  	// overhead of the "deferreturn" call when there aren't actually any
  1384  	// pending defers left, and shrink the TEXT size of compiled
  1385  	// binaries. (Admittedly, both of these are modest savings.)
  1386  
  1387  	// Ensure we're recovering within the appropriate stack.
  1388  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
  1389  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1390  		throw("bad recovery")
  1391  	}
  1392  
  1393  	// branch directly to the deferreturn
  1394  	gp.sched.sp = sp
  1395  	gp.sched.pc = gotoPc
  1396  	gp.sched.lr = 0
  1397  	// Restore the bp on platforms that support frame pointers.
  1398  	// N.B. It's fine to not set anything for platforms that don't
  1399  	// support frame pointers, since nothing consumes them.
  1400  	switch {
  1401  	case goarch.IsAmd64 != 0:
  1402  		// on x86, fp actually points one word higher than the top of
  1403  		// the frame since the return address is saved on the stack by
  1404  		// the caller
  1405  		gp.sched.bp = fp - 2*goarch.PtrSize
  1406  	case goarch.IsArm64 != 0:
  1407  		// on arm64, the architectural bp points one word higher
  1408  		// than the sp. fp is totally useless to us here, because it
  1409  		// only gets us to the caller's fp.
  1410  		gp.sched.bp = sp - goarch.PtrSize
  1411  	}
  1412  	gogo(&gp.sched)
  1413  }
  1414  
  1415  // fatalthrow implements an unrecoverable runtime throw. It freezes the
  1416  // system, prints stack traces starting from its caller, and terminates the
  1417  // process.
  1418  //
  1419  //go:nosplit
  1420  func fatalthrow(t throwType) {
  1421  	pc := sys.GetCallerPC()
  1422  	sp := sys.GetCallerSP()
  1423  	gp := getg()
  1424  
  1425  	if gp.m.throwing == throwTypeNone {
  1426  		gp.m.throwing = t
  1427  	}
  1428  
  1429  	// Switch to the system stack to avoid any stack growth, which may make
  1430  	// things worse if the runtime is in a bad state.
  1431  	systemstack(func() {
  1432  		if isSecureMode() {
  1433  			exit(2)
  1434  		}
  1435  
  1436  		startpanic_m()
  1437  
  1438  		if dopanic_m(gp, pc, sp, nil) {
  1439  			// crash uses a decent amount of nosplit stack and we're already
  1440  			// low on stack in throw, so crash on the system stack (unlike
  1441  			// fatalpanic).
  1442  			crash()
  1443  		}
  1444  
  1445  		exit(2)
  1446  	})
  1447  
  1448  	*(*int)(nil) = 0 // not reached
  1449  }
  1450  
  1451  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
  1452  // that if msgs != nil, fatalpanic also prints panic messages and decrements
  1453  // runningPanicDefers once main is blocked from exiting.
  1454  //
  1455  //go:nosplit
  1456  func fatalpanic(msgs *_panic) {
  1457  	pc := sys.GetCallerPC()
  1458  	sp := sys.GetCallerSP()
  1459  	gp := getg()
  1460  	var docrash bool
  1461  	// Switch to the system stack to avoid any stack growth, which
  1462  	// may make things worse if the runtime is in a bad state.
  1463  	systemstack(func() {
  1464  		if startpanic_m() && msgs != nil {
  1465  			// There were panic messages and startpanic_m
  1466  			// says it's okay to try to print them.
  1467  
  1468  			// startpanic_m set panicking, which will
  1469  			// block main from exiting, so now OK to
  1470  			// decrement runningPanicDefers.
  1471  			runningPanicDefers.Add(-1)
  1472  
  1473  			printpanics(msgs)
  1474  		}
  1475  
  1476  		// If this panic is the result of a synctest bubble deadlock,
  1477  		// print stacks for the goroutines in the bubble.
  1478  		var bubble *synctestBubble
  1479  		if de, ok := msgs.arg.(synctestDeadlockError); ok {
  1480  			bubble = de.bubble
  1481  		}
  1482  
  1483  		docrash = dopanic_m(gp, pc, sp, bubble)
  1484  	})
  1485  
  1486  	if docrash {
  1487  		// By crashing outside the above systemstack call, debuggers
  1488  		// will not be confused when generating a backtrace.
  1489  		// Function crash is marked nosplit to avoid stack growth.
  1490  		crash()
  1491  	}
  1492  
  1493  	systemstack(func() {
  1494  		exit(2)
  1495  	})
  1496  
  1497  	*(*int)(nil) = 0 // not reached
  1498  }
  1499  
  1500  // startpanic_m prepares for an unrecoverable panic.
  1501  //
  1502  // It returns true if panic messages should be printed, or false if
  1503  // the runtime is in bad shape and should just print stacks.
  1504  //
  1505  // It must not have write barriers even though the write barrier
  1506  // explicitly ignores writes once dying > 0. Write barriers still
  1507  // assume that g.m.p != nil, and this function may not have P
  1508  // in some contexts (e.g. a panic in a signal handler for a signal
  1509  // sent to an M with no P).
  1510  //
  1511  //go:nowritebarrierrec
  1512  func startpanic_m() bool {
  1513  	gp := getg()
  1514  	if mheap_.cachealloc.size == 0 { // very early
  1515  		print("runtime: panic before malloc heap initialized\n")
  1516  	}
  1517  	// Disallow malloc during an unrecoverable panic. A panic
  1518  	// could happen in a signal handler, or in a throw, or inside
  1519  	// malloc itself. We want to catch if an allocation ever does
  1520  	// happen (even if we're not in one of these situations).
  1521  	gp.m.mallocing++
  1522  
  1523  	// If we're dying because of a bad lock count, set it to a
  1524  	// good lock count so we don't recursively panic below.
  1525  	if gp.m.locks < 0 {
  1526  		gp.m.locks = 1
  1527  	}
  1528  
  1529  	switch gp.m.dying {
  1530  	case 0:
  1531  		// Setting dying >0 has the side-effect of disabling this G's writebuf.
  1532  		gp.m.dying = 1
  1533  		panicking.Add(1)
  1534  		lock(&paniclk)
  1535  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
  1536  			schedtrace(true)
  1537  		}
  1538  		freezetheworld()
  1539  		return true
  1540  	case 1:
  1541  		// Something failed while panicking.
  1542  		// Just print a stack trace and exit.
  1543  		gp.m.dying = 2
  1544  		print("panic during panic\n")
  1545  		return false
  1546  	case 2:
  1547  		// This is a genuine bug in the runtime, we couldn't even
  1548  		// print the stack trace successfully.
  1549  		gp.m.dying = 3
  1550  		print("stack trace unavailable\n")
  1551  		exit(4)
  1552  		fallthrough
  1553  	default:
  1554  		// Can't even print! Just exit.
  1555  		exit(5)
  1556  		return false // Need to return something.
  1557  	}
  1558  }
  1559  
  1560  var didothers bool
  1561  var deadlock mutex
  1562  
  1563  // gp is the crashing g running on this M, but may be a user G, while getg() is
  1564  // always g0.
  1565  // If bubble is non-nil, print the stacks for goroutines in this group as well.
  1566  func dopanic_m(gp *g, pc, sp uintptr, bubble *synctestBubble) bool {
  1567  	if gp.sig != 0 {
  1568  		signame := signame(gp.sig)
  1569  		if signame != "" {
  1570  			print("[signal ", signame)
  1571  		} else {
  1572  			print("[signal ", hex(gp.sig))
  1573  		}
  1574  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
  1575  	}
  1576  
  1577  	level, all, docrash := gotraceback()
  1578  	if level > 0 {
  1579  		if gp != gp.m.curg {
  1580  			all = true
  1581  		}
  1582  		if gp != gp.m.g0 {
  1583  			print("\n")
  1584  			goroutineheader(gp)
  1585  			traceback(pc, sp, 0, gp)
  1586  		} else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
  1587  			print("\nruntime stack:\n")
  1588  			traceback(pc, sp, 0, gp)
  1589  		}
  1590  		if !didothers {
  1591  			if all {
  1592  				didothers = true
  1593  				tracebackothers(gp)
  1594  			} else if bubble != nil {
  1595  				// This panic is caused by a synctest bubble deadlock.
  1596  				// Print stacks for goroutines in the deadlocked bubble.
  1597  				tracebacksomeothers(gp, func(other *g) bool {
  1598  					return bubble == other.bubble
  1599  				})
  1600  			}
  1601  		}
  1602  
  1603  	}
  1604  	unlock(&paniclk)
  1605  
  1606  	if panicking.Add(-1) != 0 {
  1607  		// Some other m is panicking too.
  1608  		// Let it print what it needs to print.
  1609  		// Wait forever without chewing up cpu.
  1610  		// It will exit when it's done.
  1611  		lock(&deadlock)
  1612  		lock(&deadlock)
  1613  	}
  1614  
  1615  	printDebugLog()
  1616  
  1617  	return docrash
  1618  }
  1619  
  1620  // canpanic returns false if a signal should throw instead of
  1621  // panicking.
  1622  //
  1623  //go:nosplit
  1624  func canpanic() bool {
  1625  	gp := getg()
  1626  	mp := acquirem()
  1627  
  1628  	// Is it okay for gp to panic instead of crashing the program?
  1629  	// Yes, as long as it is running Go code, not runtime code,
  1630  	// and not stuck in a system call.
  1631  	if gp != mp.curg {
  1632  		releasem(mp)
  1633  		return false
  1634  	}
  1635  	// N.B. mp.locks != 1 instead of 0 to account for acquirem.
  1636  	if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
  1637  		releasem(mp)
  1638  		return false
  1639  	}
  1640  	status := readgstatus(gp)
  1641  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
  1642  		releasem(mp)
  1643  		return false
  1644  	}
  1645  	if GOOS == "windows" && mp.libcallsp != 0 {
  1646  		releasem(mp)
  1647  		return false
  1648  	}
  1649  	releasem(mp)
  1650  	return true
  1651  }
  1652  
  1653  // shouldPushSigpanic reports whether pc should be used as sigpanic's
  1654  // return PC (pushing a frame for the call). Otherwise, it should be
  1655  // left alone so that LR is used as sigpanic's return PC, effectively
  1656  // replacing the top-most frame with sigpanic. This is used by
  1657  // preparePanic.
  1658  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
  1659  	if pc == 0 {
  1660  		// Probably a call to a nil func. The old LR is more
  1661  		// useful in the stack trace. Not pushing the frame
  1662  		// will make the trace look like a call to sigpanic
  1663  		// instead. (Otherwise the trace will end at sigpanic
  1664  		// and we won't get to see who faulted.)
  1665  		return false
  1666  	}
  1667  	// If we don't recognize the PC as code, but we do recognize
  1668  	// the link register as code, then this assumes the panic was
  1669  	// caused by a call to non-code. In this case, we want to
  1670  	// ignore this call to make unwinding show the context.
  1671  	//
  1672  	// If we running C code, we're not going to recognize pc as a
  1673  	// Go function, so just assume it's good. Otherwise, traceback
  1674  	// may try to read a stale LR that looks like a Go code
  1675  	// pointer and wander into the woods.
  1676  	if gp.m.incgo || findfunc(pc).valid() {
  1677  		// This wasn't a bad call, so use PC as sigpanic's
  1678  		// return PC.
  1679  		return true
  1680  	}
  1681  	if findfunc(lr).valid() {
  1682  		// This was a bad call, but the LR is good, so use the
  1683  		// LR as sigpanic's return PC.
  1684  		return false
  1685  	}
  1686  	// Neither the PC or LR is good. Hopefully pushing a frame
  1687  	// will work.
  1688  	return true
  1689  }
  1690  
  1691  // isAbortPC reports whether pc is the program counter at which
  1692  // runtime.abort raises a signal.
  1693  //
  1694  // It is nosplit because it's part of the isgoexception
  1695  // implementation.
  1696  //
  1697  //go:nosplit
  1698  func isAbortPC(pc uintptr) bool {
  1699  	f := findfunc(pc)
  1700  	if !f.valid() {
  1701  		return false
  1702  	}
  1703  	return f.funcID == abi.FuncID_abort
  1704  }
  1705  

View as plain text