Source file src/runtime/panic.go
1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "internal/runtime/atomic" 11 "internal/runtime/sys" 12 "internal/stringslite" 13 "unsafe" 14 ) 15 16 // throwType indicates the current type of ongoing throw, which affects the 17 // amount of detail printed to stderr. Higher values include more detail. 18 type throwType uint32 19 20 const ( 21 // throwTypeNone means that we are not throwing. 22 throwTypeNone throwType = iota 23 24 // throwTypeUser is a throw due to a problem with the application. 25 // 26 // These throws do not include runtime frames, system goroutines, or 27 // frame metadata. 28 throwTypeUser 29 30 // throwTypeRuntime is a throw due to a problem with Go itself. 31 // 32 // These throws include as much information as possible to aid in 33 // debugging the runtime, including runtime frames, system goroutines, 34 // and frame metadata. 35 throwTypeRuntime 36 ) 37 38 // We have two different ways of doing defers. The older way involves creating a 39 // defer record at the time that a defer statement is executing and adding it to a 40 // defer chain. This chain is inspected by the deferreturn call at all function 41 // exits in order to run the appropriate defer calls. A cheaper way (which we call 42 // open-coded defers) is used for functions in which no defer statements occur in 43 // loops. In that case, we simply store the defer function/arg information into 44 // specific stack slots at the point of each defer statement, as well as setting a 45 // bit in a bitmask. At each function exit, we add inline code to directly make 46 // the appropriate defer calls based on the bitmask and fn/arg information stored 47 // on the stack. During panic/Goexit processing, the appropriate defer calls are 48 // made using extra funcdata info that indicates the exact stack slots that 49 // contain the bitmask and defer fn/args. 50 51 // Check to make sure we can really generate a panic. If the panic 52 // was generated from the runtime, or from inside malloc, then convert 53 // to a throw of msg. 54 // pc should be the program counter of the compiler-generated code that 55 // triggered this panic. 56 func panicCheck1(pc uintptr, msg string) { 57 if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") { 58 // Note: wasm can't tail call, so we can't get the original caller's pc. 59 throw(msg) 60 } 61 // TODO: is this redundant? How could we be in malloc 62 // but not in the runtime? internal/runtime/*, maybe? 63 gp := getg() 64 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 65 throw(msg) 66 } 67 } 68 69 // Same as above, but calling from the runtime is allowed. 70 // 71 // Using this function is necessary for any panic that may be 72 // generated by runtime.sigpanic, since those are always called by the 73 // runtime. 74 func panicCheck2(err string) { 75 // panic allocates, so to avoid recursive malloc, turn panics 76 // during malloc into throws. 77 gp := getg() 78 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 79 throw(err) 80 } 81 } 82 83 // Many of the following panic entry-points turn into throws when they 84 // happen in various runtime contexts. These should never happen in 85 // the runtime, and if they do, they indicate a serious issue and 86 // should not be caught by user code. 87 // 88 // The panic{Index,Slice,divide,shift} functions are called by 89 // code generated by the compiler for out of bounds index expressions, 90 // out of bounds slice expressions, division by zero, and shift by negative. 91 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 92 // functions are called by the signal handler when a signal occurs 93 // indicating the respective problem. 94 // 95 // Since panic{Index,Slice,shift} are never called directly, and 96 // since the runtime package should never have an out of bounds slice 97 // or array reference or negative shift, if we see those functions called from the 98 // runtime package we turn the panic into a throw. That will dump the 99 // entire runtime stack for easier debugging. 100 // 101 // The entry points called by the signal handler will be called from 102 // runtime.sigpanic, so we can't disallow calls from the runtime to 103 // these (they always look like they're called from the runtime). 104 // Hence, for these, we just check for clearly bad runtime conditions. 105 // 106 // The panic{Index,Slice} functions are implemented in assembly and tail call 107 // to the goPanic{Index,Slice} functions below. This is done so we can use 108 // a space-minimal register calling convention. 109 110 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 111 // 112 //go:yeswritebarrierrec 113 func goPanicIndex(x int, y int) { 114 panicCheck1(sys.GetCallerPC(), "index out of range") 115 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 116 } 117 118 //go:yeswritebarrierrec 119 func goPanicIndexU(x uint, y int) { 120 panicCheck1(sys.GetCallerPC(), "index out of range") 121 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 122 } 123 124 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 125 // 126 //go:yeswritebarrierrec 127 func goPanicSliceAlen(x int, y int) { 128 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 129 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 130 } 131 132 //go:yeswritebarrierrec 133 func goPanicSliceAlenU(x uint, y int) { 134 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 135 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 136 } 137 138 //go:yeswritebarrierrec 139 func goPanicSliceAcap(x int, y int) { 140 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 141 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 142 } 143 144 //go:yeswritebarrierrec 145 func goPanicSliceAcapU(x uint, y int) { 146 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 147 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 148 } 149 150 // failures in the comparisons for s[x:y], 0 <= x <= y 151 // 152 //go:yeswritebarrierrec 153 func goPanicSliceB(x int, y int) { 154 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 155 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 156 } 157 158 //go:yeswritebarrierrec 159 func goPanicSliceBU(x uint, y int) { 160 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 161 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 162 } 163 164 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 165 func goPanicSlice3Alen(x int, y int) { 166 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 167 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 168 } 169 func goPanicSlice3AlenU(x uint, y int) { 170 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 171 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 172 } 173 func goPanicSlice3Acap(x int, y int) { 174 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 175 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 176 } 177 func goPanicSlice3AcapU(x uint, y int) { 178 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 179 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 180 } 181 182 // failures in the comparisons for s[:x:y], 0 <= x <= y 183 func goPanicSlice3B(x int, y int) { 184 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 185 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 186 } 187 func goPanicSlice3BU(x uint, y int) { 188 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 189 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 190 } 191 192 // failures in the comparisons for s[x:y:], 0 <= x <= y 193 func goPanicSlice3C(x int, y int) { 194 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 195 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 196 } 197 func goPanicSlice3CU(x uint, y int) { 198 panicCheck1(sys.GetCallerPC(), "slice bounds out of range") 199 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 200 } 201 202 // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) 203 func goPanicSliceConvert(x int, y int) { 204 panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array") 205 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) 206 } 207 208 // Implemented in assembly, as they take arguments in registers. 209 // Declared here to mark them as ABIInternal. 210 func panicIndex(x int, y int) 211 func panicIndexU(x uint, y int) 212 func panicSliceAlen(x int, y int) 213 func panicSliceAlenU(x uint, y int) 214 func panicSliceAcap(x int, y int) 215 func panicSliceAcapU(x uint, y int) 216 func panicSliceB(x int, y int) 217 func panicSliceBU(x uint, y int) 218 func panicSlice3Alen(x int, y int) 219 func panicSlice3AlenU(x uint, y int) 220 func panicSlice3Acap(x int, y int) 221 func panicSlice3AcapU(x uint, y int) 222 func panicSlice3B(x int, y int) 223 func panicSlice3BU(x uint, y int) 224 func panicSlice3C(x int, y int) 225 func panicSlice3CU(x uint, y int) 226 func panicSliceConvert(x int, y int) 227 228 var shiftError = error(errorString("negative shift amount")) 229 230 //go:yeswritebarrierrec 231 func panicshift() { 232 panicCheck1(sys.GetCallerPC(), "negative shift amount") 233 panic(shiftError) 234 } 235 236 var divideError = error(errorString("integer divide by zero")) 237 238 //go:yeswritebarrierrec 239 func panicdivide() { 240 panicCheck2("integer divide by zero") 241 panic(divideError) 242 } 243 244 var overflowError = error(errorString("integer overflow")) 245 246 func panicoverflow() { 247 panicCheck2("integer overflow") 248 panic(overflowError) 249 } 250 251 var floatError = error(errorString("floating point error")) 252 253 func panicfloat() { 254 panicCheck2("floating point error") 255 panic(floatError) 256 } 257 258 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 259 260 func panicmem() { 261 panicCheck2("invalid memory address or nil pointer dereference") 262 panic(memoryError) 263 } 264 265 func panicmemAddr(addr uintptr) { 266 panicCheck2("invalid memory address or nil pointer dereference") 267 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 268 } 269 270 // Create a new deferred function fn, which has no arguments and results. 271 // The compiler turns a defer statement into a call to this. 272 func deferproc(fn func()) { 273 gp := getg() 274 if gp.m.curg != gp { 275 // go code on the system stack can't defer 276 throw("defer on system stack") 277 } 278 279 d := newdefer() 280 d.link = gp._defer 281 gp._defer = d 282 d.fn = fn 283 d.pc = sys.GetCallerPC() 284 // We must not be preempted between calling GetCallerSP and 285 // storing it to d.sp because GetCallerSP's result is a 286 // uintptr stack pointer. 287 d.sp = sys.GetCallerSP() 288 } 289 290 var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false")) 291 var rangePanicError = error(errorString("range function continued iteration after loop body panic")) 292 var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit")) 293 var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking")) 294 295 //go:noinline 296 func panicrangestate(state int) { 297 switch abi.RF_State(state) { 298 case abi.RF_DONE: 299 panic(rangeDoneError) 300 case abi.RF_PANIC: 301 panic(rangePanicError) 302 case abi.RF_EXHAUSTED: 303 panic(rangeExhaustedError) 304 case abi.RF_MISSING_PANIC: 305 panic(rangeMissingPanicError) 306 } 307 throw("unexpected state passed to panicrangestate") 308 } 309 310 // deferrangefunc is called by functions that are about to 311 // execute a range-over-function loop in which the loop body 312 // may execute a defer statement. That defer needs to add to 313 // the chain for the current function, not the func literal synthesized 314 // to represent the loop body. To do that, the original function 315 // calls deferrangefunc to obtain an opaque token representing 316 // the current frame, and then the loop body uses deferprocat 317 // instead of deferproc to add to that frame's defer lists. 318 // 319 // The token is an 'any' with underlying type *atomic.Pointer[_defer]. 320 // It is the atomically-updated head of a linked list of _defer structs 321 // representing deferred calls. At the same time, we create a _defer 322 // struct on the main g._defer list with d.head set to this head pointer. 323 // 324 // The g._defer list is now a linked list of deferred calls, 325 // but an atomic list hanging off: 326 // 327 // g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil 328 // | .head 329 // | 330 // +--> dY -> dX -> nil 331 // 332 // with each -> indicating a d.link pointer, and where drangefunc 333 // has the d.rangefunc = true bit set. 334 // Note that the function being ranged over may have added 335 // its own defers (d4 and d3), so drangefunc need not be at the 336 // top of the list when deferprocat is used. This is why we pass 337 // the atomic head explicitly. 338 // 339 // To keep misbehaving programs from crashing the runtime, 340 // deferprocat pushes new defers onto the .head list atomically. 341 // The fact that it is a separate list from the main goroutine 342 // defer list means that the main goroutine's defers can still 343 // be handled non-atomically. 344 // 345 // In the diagram, dY and dX are meant to be processed when 346 // drangefunc would be processed, which is to say the defer order 347 // should be d4, d3, dY, dX, d2, d1. To make that happen, 348 // when defer processing reaches a d with rangefunc=true, 349 // it calls deferconvert to atomically take the extras 350 // away from d.head and then adds them to the main list. 351 // 352 // That is, deferconvert changes this list: 353 // 354 // g._defer => drangefunc -> d2 -> d1 -> nil 355 // | .head 356 // | 357 // +--> dY -> dX -> nil 358 // 359 // into this list: 360 // 361 // g._defer => dY -> dX -> d2 -> d1 -> nil 362 // 363 // It also poisons *drangefunc.head so that any future 364 // deferprocat using that head will throw. 365 // (The atomic head is ordinary garbage collected memory so that 366 // it's not a problem if user code holds onto it beyond 367 // the lifetime of drangefunc.) 368 // 369 // TODO: We could arrange for the compiler to call into the 370 // runtime after the loop finishes normally, to do an eager 371 // deferconvert, which would catch calling the loop body 372 // and having it defer after the loop is done. If we have a 373 // more general catch of loop body misuse, though, this 374 // might not be worth worrying about in addition. 375 // 376 // See also ../cmd/compile/internal/rangefunc/rewrite.go. 377 func deferrangefunc() any { 378 gp := getg() 379 if gp.m.curg != gp { 380 // go code on the system stack can't defer 381 throw("defer on system stack") 382 } 383 384 d := newdefer() 385 d.link = gp._defer 386 gp._defer = d 387 d.pc = sys.GetCallerPC() 388 // We must not be preempted between calling GetCallerSP and 389 // storing it to d.sp because GetCallerSP's result is a 390 // uintptr stack pointer. 391 d.sp = sys.GetCallerSP() 392 393 d.rangefunc = true 394 d.head = new(atomic.Pointer[_defer]) 395 396 return d.head 397 } 398 399 // badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head. 400 func badDefer() *_defer { 401 return (*_defer)(unsafe.Pointer(uintptr(1))) 402 } 403 404 // deferprocat is like deferproc but adds to the atomic list represented by frame. 405 // See the doc comment for deferrangefunc for details. 406 func deferprocat(fn func(), frame any) { 407 head := frame.(*atomic.Pointer[_defer]) 408 if raceenabled { 409 racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat)) 410 } 411 d1 := newdefer() 412 d1.fn = fn 413 for { 414 d1.link = head.Load() 415 if d1.link == badDefer() { 416 throw("defer after range func returned") 417 } 418 if head.CompareAndSwap(d1.link, d1) { 419 break 420 } 421 } 422 } 423 424 // deferconvert converts the rangefunc defer list of d0 into an ordinary list 425 // following d0. 426 // See the doc comment for deferrangefunc for details. 427 func deferconvert(d0 *_defer) { 428 head := d0.head 429 if raceenabled { 430 racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert)) 431 } 432 tail := d0.link 433 d0.rangefunc = false 434 435 var d *_defer 436 for { 437 d = head.Load() 438 if head.CompareAndSwap(d, badDefer()) { 439 break 440 } 441 } 442 if d == nil { 443 return 444 } 445 for d1 := d; ; d1 = d1.link { 446 d1.sp = d0.sp 447 d1.pc = d0.pc 448 if d1.link == nil { 449 d1.link = tail 450 break 451 } 452 } 453 d0.link = d 454 return 455 } 456 457 // deferprocStack queues a new deferred function with a defer record on the stack. 458 // The defer record must have its fn field initialized. 459 // All other fields can contain junk. 460 // Nosplit because of the uninitialized pointer fields on the stack. 461 // 462 //go:nosplit 463 func deferprocStack(d *_defer) { 464 gp := getg() 465 if gp.m.curg != gp { 466 // go code on the system stack can't defer 467 throw("defer on system stack") 468 } 469 470 // fn is already set. 471 // The other fields are junk on entry to deferprocStack and 472 // are initialized here. 473 d.heap = false 474 d.rangefunc = false 475 d.sp = sys.GetCallerSP() 476 d.pc = sys.GetCallerPC() 477 // The lines below implement: 478 // d.panic = nil 479 // d.fd = nil 480 // d.link = gp._defer 481 // d.head = nil 482 // gp._defer = d 483 // But without write barriers. The first three are writes to 484 // the stack so they don't need a write barrier, and furthermore 485 // are to uninitialized memory, so they must not use a write barrier. 486 // The fourth write does not require a write barrier because we 487 // explicitly mark all the defer structures, so we don't need to 488 // keep track of pointers to them with a write barrier. 489 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 490 *(*uintptr)(unsafe.Pointer(&d.head)) = 0 491 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 492 } 493 494 // Each P holds a pool for defers. 495 496 // Allocate a Defer, usually using per-P pool. 497 // Each defer must be released with freedefer. The defer is not 498 // added to any defer chain yet. 499 func newdefer() *_defer { 500 var d *_defer 501 mp := acquirem() 502 pp := mp.p.ptr() 503 if len(pp.deferpool) == 0 && sched.deferpool != nil { 504 lock(&sched.deferlock) 505 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { 506 d := sched.deferpool 507 sched.deferpool = d.link 508 d.link = nil 509 pp.deferpool = append(pp.deferpool, d) 510 } 511 unlock(&sched.deferlock) 512 } 513 if n := len(pp.deferpool); n > 0 { 514 d = pp.deferpool[n-1] 515 pp.deferpool[n-1] = nil 516 pp.deferpool = pp.deferpool[:n-1] 517 } 518 releasem(mp) 519 mp, pp = nil, nil 520 521 if d == nil { 522 // Allocate new defer. 523 d = new(_defer) 524 } 525 d.heap = true 526 return d 527 } 528 529 // popDefer pops the head of gp's defer list and frees it. 530 func popDefer(gp *g) { 531 d := gp._defer 532 d.fn = nil // Can in theory point to the stack 533 // We must not copy the stack between the updating gp._defer and setting 534 // d.link to nil. Between these two steps, d is not on any defer list, so 535 // stack copying won't adjust stack pointers in it (namely, d.link). Hence, 536 // if we were to copy the stack, d could then contain a stale pointer. 537 gp._defer = d.link 538 d.link = nil 539 // After this point we can copy the stack. 540 541 if !d.heap { 542 return 543 } 544 545 mp := acquirem() 546 pp := mp.p.ptr() 547 if len(pp.deferpool) == cap(pp.deferpool) { 548 // Transfer half of local cache to the central cache. 549 var first, last *_defer 550 for len(pp.deferpool) > cap(pp.deferpool)/2 { 551 n := len(pp.deferpool) 552 d := pp.deferpool[n-1] 553 pp.deferpool[n-1] = nil 554 pp.deferpool = pp.deferpool[:n-1] 555 if first == nil { 556 first = d 557 } else { 558 last.link = d 559 } 560 last = d 561 } 562 lock(&sched.deferlock) 563 last.link = sched.deferpool 564 sched.deferpool = first 565 unlock(&sched.deferlock) 566 } 567 568 *d = _defer{} 569 570 pp.deferpool = append(pp.deferpool, d) 571 572 releasem(mp) 573 mp, pp = nil, nil 574 } 575 576 // deferreturn runs deferred functions for the caller's frame. 577 // The compiler inserts a call to this at the end of any 578 // function which calls defer. 579 func deferreturn() { 580 var p _panic 581 p.deferreturn = true 582 583 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 584 for { 585 fn, ok := p.nextDefer() 586 if !ok { 587 break 588 } 589 fn() 590 } 591 } 592 593 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 594 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 595 // is not a panic, any recover calls in those deferred functions will return nil. 596 // 597 // Calling Goexit from the main goroutine terminates that goroutine 598 // without func main returning. Since func main has not returned, 599 // the program continues execution of other goroutines. 600 // If all other goroutines exit, the program crashes. 601 // 602 // It crashes if called from a thread not created by the Go runtime. 603 func Goexit() { 604 // Create a panic object for Goexit, so we can recognize when it might be 605 // bypassed by a recover(). 606 var p _panic 607 p.goexit = true 608 609 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 610 for { 611 fn, ok := p.nextDefer() 612 if !ok { 613 break 614 } 615 fn() 616 } 617 618 goexit1() 619 } 620 621 // Call all Error and String methods before freezing the world. 622 // Used when crashing with panicking. 623 func preprintpanics(p *_panic) { 624 defer func() { 625 text := "panic while printing panic value" 626 switch r := recover().(type) { 627 case nil: 628 // nothing to do 629 case string: 630 throw(text + ": " + r) 631 default: 632 throw(text + ": type " + toRType(efaceOf(&r)._type).string()) 633 } 634 }() 635 for p != nil { 636 if p.link != nil && *efaceOf(&p.link.arg) == *efaceOf(&p.arg) { 637 // This panic contains the same value as the next one in the chain. 638 // Mark it as repanicked. We will skip printing it twice in a row. 639 p.link.repanicked = true 640 p = p.link 641 continue 642 } 643 switch v := p.arg.(type) { 644 case error: 645 p.arg = v.Error() 646 case stringer: 647 p.arg = v.String() 648 } 649 p = p.link 650 } 651 } 652 653 // Print all currently active panics. Used when crashing. 654 // Should only be called after preprintpanics. 655 func printpanics(p *_panic) { 656 if p.link != nil { 657 printpanics(p.link) 658 if p.link.repanicked { 659 return 660 } 661 if !p.link.goexit { 662 print("\t") 663 } 664 } 665 if p.goexit { 666 return 667 } 668 print("panic: ") 669 printpanicval(p.arg) 670 if p.repanicked { 671 print(" [recovered, repanicked]") 672 } else if p.recovered { 673 print(" [recovered]") 674 } 675 print("\n") 676 } 677 678 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 679 // uint32 and a pointer to the byte following the varint. 680 // 681 // The implementation is the same with runtime.readvarint, except that this function 682 // uses unsafe.Pointer for speed. 683 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 684 var r uint32 685 var shift int 686 for { 687 b := *(*uint8)(fd) 688 fd = add(fd, unsafe.Sizeof(b)) 689 if b < 128 { 690 return r + uint32(b)<<shift, fd 691 } 692 r += uint32(b&0x7F) << (shift & 31) 693 shift += 7 694 if shift > 28 { 695 panic("Bad varint") 696 } 697 } 698 } 699 700 // A PanicNilError happens when code calls panic(nil). 701 // 702 // Before Go 1.21, programs that called panic(nil) observed recover returning nil. 703 // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. 704 // Programs can change back to the old behavior by setting GODEBUG=panicnil=1. 705 type PanicNilError struct { 706 // This field makes PanicNilError structurally different from 707 // any other struct in this package, and the _ makes it different 708 // from any struct in other packages too. 709 // This avoids any accidental conversions being possible 710 // between this struct and some other struct sharing the same fields, 711 // like happened in go.dev/issue/56603. 712 _ [0]*PanicNilError 713 } 714 715 func (*PanicNilError) Error() string { return "panic called with nil argument" } 716 func (*PanicNilError) RuntimeError() {} 717 718 var panicnil = &godebugInc{name: "panicnil"} 719 720 // The implementation of the predeclared function panic. 721 // The compiler emits calls to this function. 722 // 723 // gopanic should be an internal detail, 724 // but widely used packages access it using linkname. 725 // Notable members of the hall of shame include: 726 // - go.undefinedlabs.com/scopeagent 727 // - github.com/goplus/igop 728 // 729 // Do not remove or change the type signature. 730 // See go.dev/issue/67401. 731 // 732 //go:linkname gopanic 733 func gopanic(e any) { 734 if e == nil { 735 if debug.panicnil.Load() != 1 { 736 e = new(PanicNilError) 737 } else { 738 panicnil.IncNonDefault() 739 } 740 } 741 742 gp := getg() 743 if gp.m.curg != gp { 744 print("panic: ") 745 printpanicval(e) 746 print("\n") 747 throw("panic on system stack") 748 } 749 750 if gp.m.mallocing != 0 { 751 print("panic: ") 752 printpanicval(e) 753 print("\n") 754 throw("panic during malloc") 755 } 756 if gp.m.preemptoff != "" { 757 print("panic: ") 758 printpanicval(e) 759 print("\n") 760 print("preempt off reason: ") 761 print(gp.m.preemptoff) 762 print("\n") 763 throw("panic during preemptoff") 764 } 765 if gp.m.locks != 0 { 766 print("panic: ") 767 printpanicval(e) 768 print("\n") 769 throw("panic holding locks") 770 } 771 772 var p _panic 773 p.arg = e 774 775 runningPanicDefers.Add(1) 776 777 p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP())) 778 for { 779 fn, ok := p.nextDefer() 780 if !ok { 781 break 782 } 783 fn() 784 } 785 786 // If we're tracing, flush the current generation to make the trace more 787 // readable. 788 // 789 // TODO(aktau): Handle a panic from within traceAdvance more gracefully. 790 // Currently it would hang. Not handled now because it is very unlikely, and 791 // already unrecoverable. 792 if traceEnabled() { 793 traceAdvance(false) 794 } 795 796 // ran out of deferred calls - old-school panic now 797 // Because it is unsafe to call arbitrary user code after freezing 798 // the world, we call preprintpanics to invoke all necessary Error 799 // and String methods to prepare the panic strings before startpanic. 800 preprintpanics(&p) 801 802 fatalpanic(&p) // should not return 803 *(*int)(nil) = 0 // not reached 804 } 805 806 // start initializes a panic to start unwinding the stack. 807 // 808 // If p.goexit is true, then start may return multiple times. 809 func (p *_panic) start(pc uintptr, sp unsafe.Pointer) { 810 gp := getg() 811 812 // Record the caller's PC and SP, so recovery can identify panics 813 // that have been recovered. Also, so that if p is from Goexit, we 814 // can restart its defer processing loop if a recovered panic tries 815 // to jump past it. 816 p.startPC = sys.GetCallerPC() 817 p.startSP = unsafe.Pointer(sys.GetCallerSP()) 818 819 if p.deferreturn { 820 p.sp = sp 821 822 if s := (*savedOpenDeferState)(gp.param); s != nil { 823 // recovery saved some state for us, so that we can resume 824 // calling open-coded defers without unwinding the stack. 825 826 gp.param = nil 827 828 p.retpc = s.retpc 829 p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset)) 830 p.slotsPtr = add(sp, s.slotsOffset) 831 } 832 833 return 834 } 835 836 p.link = gp._panic 837 gp._panic = (*_panic)(noescape(unsafe.Pointer(p))) 838 839 // Initialize state machine, and find the first frame with a defer. 840 // 841 // Note: We could use startPC and startSP here, but callers will 842 // never have defer statements themselves. By starting at their 843 // caller instead, we avoid needing to unwind through an extra 844 // frame. It also somewhat simplifies the terminating condition for 845 // deferreturn. 846 p.lr, p.fp = pc, sp 847 p.nextFrame() 848 } 849 850 // nextDefer returns the next deferred function to invoke, if any. 851 // 852 // Note: The "ok bool" result is necessary to correctly handle when 853 // the deferred function itself was nil (e.g., "defer (func())(nil)"). 854 func (p *_panic) nextDefer() (func(), bool) { 855 gp := getg() 856 857 if !p.deferreturn { 858 if gp._panic != p { 859 throw("bad panic stack") 860 } 861 862 if p.recovered { 863 mcall(recovery) // does not return 864 throw("recovery failed") 865 } 866 } 867 868 // The assembler adjusts p.argp in wrapper functions that shouldn't 869 // be visible to recover(), so we need to restore it each iteration. 870 p.argp = add(p.startSP, sys.MinFrameSize) 871 872 for { 873 for p.deferBitsPtr != nil { 874 bits := *p.deferBitsPtr 875 876 // Check whether any open-coded defers are still pending. 877 // 878 // Note: We need to check this upfront (rather than after 879 // clearing the top bit) because it's possible that Goexit 880 // invokes a deferred call, and there were still more pending 881 // open-coded defers in the frame; but then the deferred call 882 // panic and invoked the remaining defers in the frame, before 883 // recovering and restarting the Goexit loop. 884 if bits == 0 { 885 p.deferBitsPtr = nil 886 break 887 } 888 889 // Find index of top bit set. 890 i := 7 - uintptr(sys.LeadingZeros8(bits)) 891 892 // Clear bit and store it back. 893 bits &^= 1 << i 894 *p.deferBitsPtr = bits 895 896 return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true 897 } 898 899 Recheck: 900 if d := gp._defer; d != nil && d.sp == uintptr(p.sp) { 901 if d.rangefunc { 902 deferconvert(d) 903 popDefer(gp) 904 goto Recheck 905 } 906 907 fn := d.fn 908 909 p.retpc = d.pc 910 911 // Unlink and free. 912 popDefer(gp) 913 914 return fn, true 915 } 916 917 if !p.nextFrame() { 918 return nil, false 919 } 920 } 921 } 922 923 // nextFrame finds the next frame that contains deferred calls, if any. 924 func (p *_panic) nextFrame() (ok bool) { 925 if p.lr == 0 { 926 return false 927 } 928 929 gp := getg() 930 systemstack(func() { 931 var limit uintptr 932 if d := gp._defer; d != nil { 933 limit = d.sp 934 } 935 936 var u unwinder 937 u.initAt(p.lr, uintptr(p.fp), 0, gp, 0) 938 for { 939 if !u.valid() { 940 p.lr = 0 941 return // ok == false 942 } 943 944 // TODO(mdempsky): If we populate u.frame.fn.deferreturn for 945 // every frame containing a defer (not just open-coded defers), 946 // then we can simply loop until we find the next frame where 947 // it's non-zero. 948 949 if u.frame.sp == limit { 950 break // found a frame with linked defers 951 } 952 953 if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) { 954 break // found a frame with open-coded defers 955 } 956 957 u.next() 958 } 959 960 p.lr = u.frame.lr 961 p.sp = unsafe.Pointer(u.frame.sp) 962 p.fp = unsafe.Pointer(u.frame.fp) 963 964 ok = true 965 }) 966 967 return 968 } 969 970 func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool { 971 fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo) 972 if fd == nil { 973 return false 974 } 975 976 if fn.deferreturn == 0 { 977 throw("missing deferreturn") 978 } 979 980 deferBitsOffset, fd := readvarintUnsafe(fd) 981 deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset))) 982 if *deferBitsPtr == 0 { 983 return false // has open-coded defers, but none pending 984 } 985 986 slotsOffset, fd := readvarintUnsafe(fd) 987 988 p.retpc = fn.entry() + uintptr(fn.deferreturn) 989 p.deferBitsPtr = deferBitsPtr 990 p.slotsPtr = add(varp, -uintptr(slotsOffset)) 991 992 return true 993 } 994 995 // The implementation of the predeclared function recover. 996 // Cannot split the stack because it needs to reliably 997 // find the stack segment of its caller. 998 // 999 // TODO(rsc): Once we commit to CopyStackAlways, 1000 // this doesn't need to be nosplit. 1001 // 1002 //go:nosplit 1003 func gorecover(argp uintptr) any { 1004 // Must be in a function running as part of a deferred call during the panic. 1005 // Must be called from the topmost function of the call 1006 // (the function used in the defer statement). 1007 // p.argp is the argument pointer of that topmost deferred function call. 1008 // Compare against argp reported by caller. 1009 // If they match, the caller is the one who can recover. 1010 gp := getg() 1011 p := gp._panic 1012 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1013 p.recovered = true 1014 return p.arg 1015 } 1016 return nil 1017 } 1018 1019 //go:linkname sync_throw sync.throw 1020 func sync_throw(s string) { 1021 throw(s) 1022 } 1023 1024 //go:linkname sync_fatal sync.fatal 1025 func sync_fatal(s string) { 1026 fatal(s) 1027 } 1028 1029 //go:linkname rand_fatal crypto/rand.fatal 1030 func rand_fatal(s string) { 1031 fatal(s) 1032 } 1033 1034 //go:linkname sysrand_fatal crypto/internal/sysrand.fatal 1035 func sysrand_fatal(s string) { 1036 fatal(s) 1037 } 1038 1039 //go:linkname fips_fatal crypto/internal/fips140.fatal 1040 func fips_fatal(s string) { 1041 fatal(s) 1042 } 1043 1044 //go:linkname maps_fatal internal/runtime/maps.fatal 1045 func maps_fatal(s string) { 1046 fatal(s) 1047 } 1048 1049 //go:linkname internal_sync_throw internal/sync.throw 1050 func internal_sync_throw(s string) { 1051 throw(s) 1052 } 1053 1054 //go:linkname internal_sync_fatal internal/sync.fatal 1055 func internal_sync_fatal(s string) { 1056 fatal(s) 1057 } 1058 1059 //go:linkname cgroup_throw internal/runtime/cgroup.throw 1060 func cgroup_throw(s string) { 1061 throw(s) 1062 } 1063 1064 // throw triggers a fatal error that dumps a stack trace and exits. 1065 // 1066 // throw should be used for runtime-internal fatal errors where Go itself, 1067 // rather than user code, may be at fault for the failure. 1068 // 1069 // throw should be an internal detail, 1070 // but widely used packages access it using linkname. 1071 // Notable members of the hall of shame include: 1072 // - github.com/bytedance/sonic 1073 // - github.com/cockroachdb/pebble 1074 // - github.com/dgraph-io/ristretto 1075 // - github.com/outcaste-io/ristretto 1076 // - github.com/pingcap/br 1077 // - gvisor.dev/gvisor 1078 // - github.com/sagernet/gvisor 1079 // 1080 // Do not remove or change the type signature. 1081 // See go.dev/issue/67401. 1082 // 1083 //go:linkname throw 1084 //go:nosplit 1085 func throw(s string) { 1086 // Everything throw does should be recursively nosplit so it 1087 // can be called even when it's unsafe to grow the stack. 1088 systemstack(func() { 1089 print("fatal error: ") 1090 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1091 print("\n") 1092 }) 1093 1094 fatalthrow(throwTypeRuntime) 1095 } 1096 1097 // fatal triggers a fatal error that dumps a stack trace and exits. 1098 // 1099 // fatal is equivalent to throw, but is used when user code is expected to be 1100 // at fault for the failure, such as racing map writes. 1101 // 1102 // fatal does not include runtime frames, system goroutines, or frame metadata 1103 // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. 1104 // 1105 //go:nosplit 1106 func fatal(s string) { 1107 // Everything fatal does should be recursively nosplit so it 1108 // can be called even when it's unsafe to grow the stack. 1109 printlock() // Prevent multiple interleaved fatal reports. See issue 69447. 1110 systemstack(func() { 1111 print("fatal error: ") 1112 printindented(s) // logically printpanicval(s), but avoids convTstring write barrier 1113 print("\n") 1114 }) 1115 1116 fatalthrow(throwTypeUser) 1117 printunlock() 1118 } 1119 1120 // runningPanicDefers is non-zero while running deferred functions for panic. 1121 // This is used to try hard to get a panic stack trace out when exiting. 1122 var runningPanicDefers atomic.Uint32 1123 1124 // panicking is non-zero when crashing the program for an unrecovered panic. 1125 var panicking atomic.Uint32 1126 1127 // paniclk is held while printing the panic information and stack trace, 1128 // so that two concurrent panics don't overlap their output. 1129 var paniclk mutex 1130 1131 // Unwind the stack after a deferred function calls recover 1132 // after a panic. Then arrange to continue running as though 1133 // the caller of the deferred function returned normally. 1134 // 1135 // However, if unwinding the stack would skip over a Goexit call, we 1136 // return into the Goexit loop instead, so it can continue processing 1137 // defers instead. 1138 func recovery(gp *g) { 1139 p := gp._panic 1140 pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp) 1141 p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0 1142 1143 // The linker records the f-relative address of a call to deferreturn in f's funcInfo. 1144 // Assuming a "normal" call to recover() inside one of f's deferred functions 1145 // invoked for a panic, that is the desired PC for exiting f. 1146 f := findfunc(pc) 1147 if f.deferreturn == 0 { 1148 throw("no deferreturn") 1149 } 1150 gotoPc := f.entry() + uintptr(f.deferreturn) 1151 1152 // Unwind the panic stack. 1153 for ; p != nil && uintptr(p.startSP) < sp; p = p.link { 1154 // Don't allow jumping past a pending Goexit. 1155 // Instead, have its _panic.start() call return again. 1156 // 1157 // TODO(mdempsky): In this case, Goexit will resume walking the 1158 // stack where it left off, which means it will need to rewalk 1159 // frames that we've already processed. 1160 // 1161 // There's a similar issue with nested panics, when the inner 1162 // panic supersedes the outer panic. Again, we end up needing to 1163 // walk the same stack frames. 1164 // 1165 // These are probably pretty rare occurrences in practice, and 1166 // they don't seem any worse than the existing logic. But if we 1167 // move the unwinding state into _panic, we could detect when we 1168 // run into where the last panic started, and then just pick up 1169 // where it left off instead. 1170 // 1171 // With how subtle defer handling is, this might not actually be 1172 // worthwhile though. 1173 if p.goexit { 1174 gotoPc, sp = p.startPC, uintptr(p.startSP) 1175 saveOpenDeferState = false // goexit is unwinding the stack anyway 1176 break 1177 } 1178 1179 runningPanicDefers.Add(-1) 1180 } 1181 gp._panic = p 1182 1183 if p == nil { // must be done with signal 1184 gp.sig = 0 1185 } 1186 1187 if gp.param != nil { 1188 throw("unexpected gp.param") 1189 } 1190 if saveOpenDeferState { 1191 // If we're returning to deferreturn and there are more open-coded 1192 // defers for it to call, save enough state for it to be able to 1193 // pick up where p0 left off. 1194 gp.param = unsafe.Pointer(&savedOpenDeferState{ 1195 retpc: p0.retpc, 1196 1197 // We need to save deferBitsPtr and slotsPtr too, but those are 1198 // stack pointers. To avoid issues around heap objects pointing 1199 // to the stack, save them as offsets from SP. 1200 deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp), 1201 slotsOffset: uintptr(p0.slotsPtr) - uintptr(p0.sp), 1202 }) 1203 } 1204 1205 // TODO(mdempsky): Currently, we rely on frames containing "defer" 1206 // to end with "CALL deferreturn; RET". This allows deferreturn to 1207 // finish running any pending defers in the frame. 1208 // 1209 // But we should be able to tell whether there are still pending 1210 // defers here. If there aren't, we can just jump directly to the 1211 // "RET" instruction. And if there are, we don't need an actual 1212 // "CALL deferreturn" instruction; we can simulate it with something 1213 // like: 1214 // 1215 // if usesLR { 1216 // lr = pc 1217 // } else { 1218 // sp -= sizeof(pc) 1219 // *(*uintptr)(sp) = pc 1220 // } 1221 // pc = funcPC(deferreturn) 1222 // 1223 // So that we effectively tail call into deferreturn, such that it 1224 // then returns to the simple "RET" epilogue. That would save the 1225 // overhead of the "deferreturn" call when there aren't actually any 1226 // pending defers left, and shrink the TEXT size of compiled 1227 // binaries. (Admittedly, both of these are modest savings.) 1228 1229 // Ensure we're recovering within the appropriate stack. 1230 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1231 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1232 throw("bad recovery") 1233 } 1234 1235 // branch directly to the deferreturn 1236 gp.sched.sp = sp 1237 gp.sched.pc = gotoPc 1238 gp.sched.lr = 0 1239 // Restore the bp on platforms that support frame pointers. 1240 // N.B. It's fine to not set anything for platforms that don't 1241 // support frame pointers, since nothing consumes them. 1242 switch { 1243 case goarch.IsAmd64 != 0: 1244 // on x86, fp actually points one word higher than the top of 1245 // the frame since the return address is saved on the stack by 1246 // the caller 1247 gp.sched.bp = fp - 2*goarch.PtrSize 1248 case goarch.IsArm64 != 0: 1249 // on arm64, the architectural bp points one word higher 1250 // than the sp. fp is totally useless to us here, because it 1251 // only gets us to the caller's fp. 1252 gp.sched.bp = sp - goarch.PtrSize 1253 } 1254 gogo(&gp.sched) 1255 } 1256 1257 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1258 // system, prints stack traces starting from its caller, and terminates the 1259 // process. 1260 // 1261 //go:nosplit 1262 func fatalthrow(t throwType) { 1263 pc := sys.GetCallerPC() 1264 sp := sys.GetCallerSP() 1265 gp := getg() 1266 1267 if gp.m.throwing == throwTypeNone { 1268 gp.m.throwing = t 1269 } 1270 1271 // Switch to the system stack to avoid any stack growth, which may make 1272 // things worse if the runtime is in a bad state. 1273 systemstack(func() { 1274 if isSecureMode() { 1275 exit(2) 1276 } 1277 1278 startpanic_m() 1279 1280 if dopanic_m(gp, pc, sp, nil) { 1281 // crash uses a decent amount of nosplit stack and we're already 1282 // low on stack in throw, so crash on the system stack (unlike 1283 // fatalpanic). 1284 crash() 1285 } 1286 1287 exit(2) 1288 }) 1289 1290 *(*int)(nil) = 0 // not reached 1291 } 1292 1293 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1294 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1295 // runningPanicDefers once main is blocked from exiting. 1296 // 1297 //go:nosplit 1298 func fatalpanic(msgs *_panic) { 1299 pc := sys.GetCallerPC() 1300 sp := sys.GetCallerSP() 1301 gp := getg() 1302 var docrash bool 1303 // Switch to the system stack to avoid any stack growth, which 1304 // may make things worse if the runtime is in a bad state. 1305 systemstack(func() { 1306 if startpanic_m() && msgs != nil { 1307 // There were panic messages and startpanic_m 1308 // says it's okay to try to print them. 1309 1310 // startpanic_m set panicking, which will 1311 // block main from exiting, so now OK to 1312 // decrement runningPanicDefers. 1313 runningPanicDefers.Add(-1) 1314 1315 printpanics(msgs) 1316 } 1317 1318 // If this panic is the result of a synctest bubble deadlock, 1319 // print stacks for the goroutines in the bubble. 1320 var bubble *synctestBubble 1321 if de, ok := msgs.arg.(synctestDeadlockError); ok { 1322 bubble = de.bubble 1323 } 1324 1325 docrash = dopanic_m(gp, pc, sp, bubble) 1326 }) 1327 1328 if docrash { 1329 // By crashing outside the above systemstack call, debuggers 1330 // will not be confused when generating a backtrace. 1331 // Function crash is marked nosplit to avoid stack growth. 1332 crash() 1333 } 1334 1335 systemstack(func() { 1336 exit(2) 1337 }) 1338 1339 *(*int)(nil) = 0 // not reached 1340 } 1341 1342 // startpanic_m prepares for an unrecoverable panic. 1343 // 1344 // It returns true if panic messages should be printed, or false if 1345 // the runtime is in bad shape and should just print stacks. 1346 // 1347 // It must not have write barriers even though the write barrier 1348 // explicitly ignores writes once dying > 0. Write barriers still 1349 // assume that g.m.p != nil, and this function may not have P 1350 // in some contexts (e.g. a panic in a signal handler for a signal 1351 // sent to an M with no P). 1352 // 1353 //go:nowritebarrierrec 1354 func startpanic_m() bool { 1355 gp := getg() 1356 if mheap_.cachealloc.size == 0 { // very early 1357 print("runtime: panic before malloc heap initialized\n") 1358 } 1359 // Disallow malloc during an unrecoverable panic. A panic 1360 // could happen in a signal handler, or in a throw, or inside 1361 // malloc itself. We want to catch if an allocation ever does 1362 // happen (even if we're not in one of these situations). 1363 gp.m.mallocing++ 1364 1365 // If we're dying because of a bad lock count, set it to a 1366 // good lock count so we don't recursively panic below. 1367 if gp.m.locks < 0 { 1368 gp.m.locks = 1 1369 } 1370 1371 switch gp.m.dying { 1372 case 0: 1373 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1374 gp.m.dying = 1 1375 panicking.Add(1) 1376 lock(&paniclk) 1377 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1378 schedtrace(true) 1379 } 1380 freezetheworld() 1381 return true 1382 case 1: 1383 // Something failed while panicking. 1384 // Just print a stack trace and exit. 1385 gp.m.dying = 2 1386 print("panic during panic\n") 1387 return false 1388 case 2: 1389 // This is a genuine bug in the runtime, we couldn't even 1390 // print the stack trace successfully. 1391 gp.m.dying = 3 1392 print("stack trace unavailable\n") 1393 exit(4) 1394 fallthrough 1395 default: 1396 // Can't even print! Just exit. 1397 exit(5) 1398 return false // Need to return something. 1399 } 1400 } 1401 1402 var didothers bool 1403 var deadlock mutex 1404 1405 // gp is the crashing g running on this M, but may be a user G, while getg() is 1406 // always g0. 1407 // If bubble is non-nil, print the stacks for goroutines in this group as well. 1408 func dopanic_m(gp *g, pc, sp uintptr, bubble *synctestBubble) bool { 1409 if gp.sig != 0 { 1410 signame := signame(gp.sig) 1411 if signame != "" { 1412 print("[signal ", signame) 1413 } else { 1414 print("[signal ", hex(gp.sig)) 1415 } 1416 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1417 } 1418 1419 level, all, docrash := gotraceback() 1420 if level > 0 { 1421 if gp != gp.m.curg { 1422 all = true 1423 } 1424 if gp != gp.m.g0 { 1425 print("\n") 1426 goroutineheader(gp) 1427 traceback(pc, sp, 0, gp) 1428 } else if level >= 2 || gp.m.throwing >= throwTypeRuntime { 1429 print("\nruntime stack:\n") 1430 traceback(pc, sp, 0, gp) 1431 } 1432 if !didothers { 1433 if all { 1434 didothers = true 1435 tracebackothers(gp) 1436 } else if bubble != nil { 1437 // This panic is caused by a synctest bubble deadlock. 1438 // Print stacks for goroutines in the deadlocked bubble. 1439 tracebacksomeothers(gp, func(other *g) bool { 1440 return bubble == other.bubble 1441 }) 1442 } 1443 } 1444 1445 } 1446 unlock(&paniclk) 1447 1448 if panicking.Add(-1) != 0 { 1449 // Some other m is panicking too. 1450 // Let it print what it needs to print. 1451 // Wait forever without chewing up cpu. 1452 // It will exit when it's done. 1453 lock(&deadlock) 1454 lock(&deadlock) 1455 } 1456 1457 printDebugLog() 1458 1459 return docrash 1460 } 1461 1462 // canpanic returns false if a signal should throw instead of 1463 // panicking. 1464 // 1465 //go:nosplit 1466 func canpanic() bool { 1467 gp := getg() 1468 mp := acquirem() 1469 1470 // Is it okay for gp to panic instead of crashing the program? 1471 // Yes, as long as it is running Go code, not runtime code, 1472 // and not stuck in a system call. 1473 if gp != mp.curg { 1474 releasem(mp) 1475 return false 1476 } 1477 // N.B. mp.locks != 1 instead of 0 to account for acquirem. 1478 if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { 1479 releasem(mp) 1480 return false 1481 } 1482 status := readgstatus(gp) 1483 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1484 releasem(mp) 1485 return false 1486 } 1487 if GOOS == "windows" && mp.libcallsp != 0 { 1488 releasem(mp) 1489 return false 1490 } 1491 releasem(mp) 1492 return true 1493 } 1494 1495 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1496 // return PC (pushing a frame for the call). Otherwise, it should be 1497 // left alone so that LR is used as sigpanic's return PC, effectively 1498 // replacing the top-most frame with sigpanic. This is used by 1499 // preparePanic. 1500 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1501 if pc == 0 { 1502 // Probably a call to a nil func. The old LR is more 1503 // useful in the stack trace. Not pushing the frame 1504 // will make the trace look like a call to sigpanic 1505 // instead. (Otherwise the trace will end at sigpanic 1506 // and we won't get to see who faulted.) 1507 return false 1508 } 1509 // If we don't recognize the PC as code, but we do recognize 1510 // the link register as code, then this assumes the panic was 1511 // caused by a call to non-code. In this case, we want to 1512 // ignore this call to make unwinding show the context. 1513 // 1514 // If we running C code, we're not going to recognize pc as a 1515 // Go function, so just assume it's good. Otherwise, traceback 1516 // may try to read a stale LR that looks like a Go code 1517 // pointer and wander into the woods. 1518 if gp.m.incgo || findfunc(pc).valid() { 1519 // This wasn't a bad call, so use PC as sigpanic's 1520 // return PC. 1521 return true 1522 } 1523 if findfunc(lr).valid() { 1524 // This was a bad call, but the LR is good, so use the 1525 // LR as sigpanic's return PC. 1526 return false 1527 } 1528 // Neither the PC or LR is good. Hopefully pushing a frame 1529 // will work. 1530 return true 1531 } 1532 1533 // isAbortPC reports whether pc is the program counter at which 1534 // runtime.abort raises a signal. 1535 // 1536 // It is nosplit because it's part of the isgoexception 1537 // implementation. 1538 // 1539 //go:nosplit 1540 func isAbortPC(pc uintptr) bool { 1541 f := findfunc(pc) 1542 if !f.valid() { 1543 return false 1544 } 1545 return f.funcID == abi.FuncID_abort 1546 } 1547