Source file src/runtime/signal_unix.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build unix
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goexperiment"
    12  	"internal/runtime/atomic"
    13  	"internal/runtime/sys"
    14  	"unsafe"
    15  )
    16  
    17  // sigTabT is the type of an entry in the global sigtable array.
    18  // sigtable is inherently system dependent, and appears in OS-specific files,
    19  // but sigTabT is the same for all Unixy systems.
    20  // The sigtable array is indexed by a system signal number to get the flags
    21  // and printable name of each signal.
    22  type sigTabT struct {
    23  	flags int32
    24  	name  string
    25  }
    26  
    27  //go:linkname os_sigpipe os.sigpipe
    28  func os_sigpipe() {
    29  	systemstack(sigpipe)
    30  }
    31  
    32  func signame(sig uint32) string {
    33  	if sig >= uint32(len(sigtable)) {
    34  		return ""
    35  	}
    36  	return sigtable[sig].name
    37  }
    38  
    39  const (
    40  	_SIG_DFL uintptr = 0
    41  	_SIG_IGN uintptr = 1
    42  )
    43  
    44  // sigPreempt is the signal used for non-cooperative preemption.
    45  //
    46  // There's no good way to choose this signal, but there are some
    47  // heuristics:
    48  //
    49  // 1. It should be a signal that's passed-through by debuggers by
    50  // default. On Linux, this is SIGALRM, SIGURG, SIGCHLD, SIGIO,
    51  // SIGVTALRM, SIGPROF, and SIGWINCH, plus some glibc-internal signals.
    52  //
    53  // 2. It shouldn't be used internally by libc in mixed Go/C binaries
    54  // because libc may assume it's the only thing that can handle these
    55  // signals. For example SIGCANCEL or SIGSETXID.
    56  //
    57  // 3. It should be a signal that can happen spuriously without
    58  // consequences. For example, SIGALRM is a bad choice because the
    59  // signal handler can't tell if it was caused by the real process
    60  // alarm or not (arguably this means the signal is broken, but I
    61  // digress). SIGUSR1 and SIGUSR2 are also bad because those are often
    62  // used in meaningful ways by applications.
    63  //
    64  // 4. We need to deal with platforms without real-time signals (like
    65  // macOS), so those are out.
    66  //
    67  // We use SIGURG because it meets all of these criteria, is extremely
    68  // unlikely to be used by an application for its "real" meaning (both
    69  // because out-of-band data is basically unused and because SIGURG
    70  // doesn't report which socket has the condition, making it pretty
    71  // useless), and even if it is, the application has to be ready for
    72  // spurious SIGURG. SIGIO wouldn't be a bad choice either, but is more
    73  // likely to be used for real.
    74  const sigPreempt = _SIGURG
    75  
    76  // Stores the signal handlers registered before Go installed its own.
    77  // These signal handlers will be invoked in cases where Go doesn't want to
    78  // handle a particular signal (e.g., signal occurred on a non-Go thread).
    79  // See sigfwdgo for more information on when the signals are forwarded.
    80  //
    81  // This is read by the signal handler; accesses should use
    82  // atomic.Loaduintptr and atomic.Storeuintptr.
    83  var fwdSig [_NSIG]uintptr
    84  
    85  // handlingSig is indexed by signal number and is non-zero if we are
    86  // currently handling the signal. Or, to put it another way, whether
    87  // the signal handler is currently set to the Go signal handler or not.
    88  // This is uint32 rather than bool so that we can use atomic instructions.
    89  var handlingSig [_NSIG]uint32
    90  
    91  // channels for synchronizing signal mask updates with the signal mask
    92  // thread
    93  var (
    94  	disableSigChan  chan uint32
    95  	enableSigChan   chan uint32
    96  	maskUpdatedChan chan struct{}
    97  )
    98  
    99  func init() {
   100  	// _NSIG is the number of signals on this operating system.
   101  	// sigtable should describe what to do for all the possible signals.
   102  	if len(sigtable) != _NSIG {
   103  		print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
   104  		throw("bad sigtable len")
   105  	}
   106  }
   107  
   108  var signalsOK bool
   109  
   110  // Initialize signals.
   111  // Called by libpreinit so runtime may not be initialized.
   112  //
   113  //go:nosplit
   114  //go:nowritebarrierrec
   115  func initsig(preinit bool) {
   116  	if !preinit {
   117  		// It's now OK for signal handlers to run.
   118  		signalsOK = true
   119  	}
   120  
   121  	// For c-archive/c-shared this is called by libpreinit with
   122  	// preinit == true.
   123  	if (isarchive || islibrary) && !preinit {
   124  		return
   125  	}
   126  
   127  	for i := uint32(0); i < _NSIG; i++ {
   128  		t := &sigtable[i]
   129  		if t.flags == 0 || t.flags&_SigDefault != 0 {
   130  			continue
   131  		}
   132  
   133  		// We don't need to use atomic operations here because
   134  		// there shouldn't be any other goroutines running yet.
   135  		fwdSig[i] = getsig(i)
   136  
   137  		if !sigInstallGoHandler(i) {
   138  			// Even if we are not installing a signal handler,
   139  			// set SA_ONSTACK if necessary.
   140  			if fwdSig[i] != _SIG_DFL && fwdSig[i] != _SIG_IGN {
   141  				setsigstack(i)
   142  			} else if fwdSig[i] == _SIG_IGN {
   143  				sigInitIgnored(i)
   144  			}
   145  			continue
   146  		}
   147  
   148  		handlingSig[i] = 1
   149  		setsig(i, abi.FuncPCABIInternal(sighandler))
   150  	}
   151  }
   152  
   153  //go:nosplit
   154  //go:nowritebarrierrec
   155  func sigInstallGoHandler(sig uint32) bool {
   156  	// For some signals, we respect an inherited SIG_IGN handler
   157  	// rather than insist on installing our own default handler.
   158  	// Even these signals can be fetched using the os/signal package.
   159  	switch sig {
   160  	case _SIGHUP, _SIGINT:
   161  		if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
   162  			return false
   163  		}
   164  	}
   165  
   166  	if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
   167  		// sigPerThreadSyscall is the same signal used by glibc for
   168  		// per-thread syscalls on Linux. We use it for the same purpose
   169  		// in non-cgo binaries.
   170  		return true
   171  	}
   172  
   173  	t := &sigtable[sig]
   174  	if t.flags&_SigSetStack != 0 {
   175  		return false
   176  	}
   177  
   178  	// When built using c-archive or c-shared, only install signal
   179  	// handlers for synchronous signals and SIGPIPE and sigPreempt.
   180  	if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE && sig != sigPreempt {
   181  		return false
   182  	}
   183  
   184  	return true
   185  }
   186  
   187  // sigenable enables the Go signal handler to catch the signal sig.
   188  // It is only called while holding the os/signal.handlers lock,
   189  // via os/signal.enableSignal and signal_enable.
   190  func sigenable(sig uint32) {
   191  	if sig >= uint32(len(sigtable)) {
   192  		return
   193  	}
   194  
   195  	// SIGPROF is handled specially for profiling.
   196  	if sig == _SIGPROF {
   197  		return
   198  	}
   199  
   200  	t := &sigtable[sig]
   201  	if t.flags&_SigNotify != 0 {
   202  		ensureSigM()
   203  		enableSigChan <- sig
   204  		<-maskUpdatedChan
   205  		if atomic.Cas(&handlingSig[sig], 0, 1) {
   206  			atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
   207  			setsig(sig, abi.FuncPCABIInternal(sighandler))
   208  		}
   209  	}
   210  }
   211  
   212  // sigdisable disables the Go signal handler for the signal sig.
   213  // It is only called while holding the os/signal.handlers lock,
   214  // via os/signal.disableSignal and signal_disable.
   215  func sigdisable(sig uint32) {
   216  	if sig >= uint32(len(sigtable)) {
   217  		return
   218  	}
   219  
   220  	// SIGPROF is handled specially for profiling.
   221  	if sig == _SIGPROF {
   222  		return
   223  	}
   224  
   225  	t := &sigtable[sig]
   226  	if t.flags&_SigNotify != 0 {
   227  		ensureSigM()
   228  		disableSigChan <- sig
   229  		<-maskUpdatedChan
   230  
   231  		// If initsig does not install a signal handler for a
   232  		// signal, then to go back to the state before Notify
   233  		// we should remove the one we installed.
   234  		if !sigInstallGoHandler(sig) {
   235  			atomic.Store(&handlingSig[sig], 0)
   236  			setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
   237  		}
   238  	}
   239  }
   240  
   241  // sigignore ignores the signal sig.
   242  // It is only called while holding the os/signal.handlers lock,
   243  // via os/signal.ignoreSignal and signal_ignore.
   244  func sigignore(sig uint32) {
   245  	if sig >= uint32(len(sigtable)) {
   246  		return
   247  	}
   248  
   249  	// SIGPROF is handled specially for profiling.
   250  	if sig == _SIGPROF {
   251  		return
   252  	}
   253  
   254  	t := &sigtable[sig]
   255  	if t.flags&_SigNotify != 0 {
   256  		atomic.Store(&handlingSig[sig], 0)
   257  		setsig(sig, _SIG_IGN)
   258  	}
   259  }
   260  
   261  // clearSignalHandlers clears all signal handlers that are not ignored
   262  // back to the default. This is called by the child after a fork, so that
   263  // we can enable the signal mask for the exec without worrying about
   264  // running a signal handler in the child.
   265  //
   266  //go:nosplit
   267  //go:nowritebarrierrec
   268  func clearSignalHandlers() {
   269  	for i := uint32(0); i < _NSIG; i++ {
   270  		if atomic.Load(&handlingSig[i]) != 0 {
   271  			setsig(i, _SIG_DFL)
   272  		}
   273  	}
   274  }
   275  
   276  // setProcessCPUProfilerTimer is called when the profiling timer changes.
   277  // It is called with prof.signalLock held. hz is the new timer, and is 0 if
   278  // profiling is being disabled. Enable or disable the signal as
   279  // required for -buildmode=c-archive.
   280  func setProcessCPUProfilerTimer(hz int32) {
   281  	if hz != 0 {
   282  		// Enable the Go signal handler if not enabled.
   283  		if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
   284  			h := getsig(_SIGPROF)
   285  			// If no signal handler was installed before, then we record
   286  			// _SIG_IGN here. When we turn off profiling (below) we'll start
   287  			// ignoring SIGPROF signals. We do this, rather than change
   288  			// to SIG_DFL, because there may be a pending SIGPROF
   289  			// signal that has not yet been delivered to some other thread.
   290  			// If we change to SIG_DFL when turning off profiling, the
   291  			// program will crash when that SIGPROF is delivered. We assume
   292  			// that programs that use profiling don't want to crash on a
   293  			// stray SIGPROF. See issue 19320.
   294  			// We do the change here instead of when turning off profiling,
   295  			// because there we may race with a signal handler running
   296  			// concurrently, in particular, sigfwdgo may observe _SIG_DFL and
   297  			// die. See issue 43828.
   298  			if h == _SIG_DFL {
   299  				h = _SIG_IGN
   300  			}
   301  			atomic.Storeuintptr(&fwdSig[_SIGPROF], h)
   302  			setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
   303  		}
   304  
   305  		var it itimerval
   306  		it.it_interval.tv_sec = 0
   307  		it.it_interval.set_usec(1000000 / hz)
   308  		it.it_value = it.it_interval
   309  		setitimer(_ITIMER_PROF, &it, nil)
   310  	} else {
   311  		setitimer(_ITIMER_PROF, &itimerval{}, nil)
   312  
   313  		// If the Go signal handler should be disabled by default,
   314  		// switch back to the signal handler that was installed
   315  		// when we enabled profiling. We don't try to handle the case
   316  		// of a program that changes the SIGPROF handler while Go
   317  		// profiling is enabled.
   318  		if !sigInstallGoHandler(_SIGPROF) {
   319  			if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
   320  				h := atomic.Loaduintptr(&fwdSig[_SIGPROF])
   321  				setsig(_SIGPROF, h)
   322  			}
   323  		}
   324  	}
   325  }
   326  
   327  // setThreadCPUProfilerHz makes any thread-specific changes required to
   328  // implement profiling at a rate of hz.
   329  // No changes required on Unix systems when using setitimer.
   330  func setThreadCPUProfilerHz(hz int32) {
   331  	getg().m.profilehz = hz
   332  }
   333  
   334  func sigpipe() {
   335  	if signal_ignored(_SIGPIPE) || sigsend(_SIGPIPE) {
   336  		return
   337  	}
   338  	dieFromSignal(_SIGPIPE)
   339  }
   340  
   341  // doSigPreempt handles a preemption signal on gp.
   342  func doSigPreempt(gp *g, ctxt *sigctxt) {
   343  	// Check if this G wants to be preempted and is safe to
   344  	// preempt.
   345  	if wantAsyncPreempt(gp) {
   346  		if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
   347  			// Adjust the PC and inject a call to asyncPreempt.
   348  			ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
   349  		}
   350  	}
   351  
   352  	// Acknowledge the preemption.
   353  	gp.m.preemptGen.Add(1)
   354  	gp.m.signalPending.Store(0)
   355  
   356  	if GOOS == "darwin" || GOOS == "ios" {
   357  		pendingPreemptSignals.Add(-1)
   358  	}
   359  }
   360  
   361  const preemptMSupported = true
   362  
   363  // preemptM sends a preemption request to mp. This request may be
   364  // handled asynchronously and may be coalesced with other requests to
   365  // the M. When the request is received, if the running G or P are
   366  // marked for preemption and the goroutine is at an asynchronous
   367  // safe-point, it will preempt the goroutine. It always atomically
   368  // increments mp.preemptGen after handling a preemption request.
   369  func preemptM(mp *m) {
   370  	// On Darwin, don't try to preempt threads during exec.
   371  	// Issue #41702.
   372  	if GOOS == "darwin" || GOOS == "ios" {
   373  		execLock.rlock()
   374  	}
   375  
   376  	if mp.signalPending.CompareAndSwap(0, 1) {
   377  		if GOOS == "darwin" || GOOS == "ios" {
   378  			pendingPreemptSignals.Add(1)
   379  		}
   380  
   381  		// If multiple threads are preempting the same M, it may send many
   382  		// signals to the same M such that it hardly make progress, causing
   383  		// live-lock problem. Apparently this could happen on darwin. See
   384  		// issue #37741.
   385  		// Only send a signal if there isn't already one pending.
   386  		signalM(mp, sigPreempt)
   387  	}
   388  
   389  	if GOOS == "darwin" || GOOS == "ios" {
   390  		execLock.runlock()
   391  	}
   392  }
   393  
   394  // sigFetchG fetches the value of G safely when running in a signal handler.
   395  // On some architectures, the g value may be clobbered when running in a VDSO.
   396  // See issue #32912.
   397  //
   398  //go:nosplit
   399  func sigFetchG(c *sigctxt) *g {
   400  	switch GOARCH {
   401  	case "arm", "arm64", "loong64", "ppc64", "ppc64le", "riscv64", "s390x":
   402  		if !iscgo && inVDSOPage(c.sigpc()) {
   403  			// When using cgo, we save the g on TLS and load it from there
   404  			// in sigtramp. Just use that.
   405  			// Otherwise, before making a VDSO call we save the g to the
   406  			// bottom of the signal stack. Fetch from there.
   407  			// TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
   408  			// work.
   409  			sp := sys.GetCallerSP()
   410  			s := spanOf(sp)
   411  			if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
   412  				gp := *(**g)(unsafe.Pointer(s.base()))
   413  				return gp
   414  			}
   415  			return nil
   416  		}
   417  	}
   418  	return getg()
   419  }
   420  
   421  // sigtrampgo is called from the signal handler function, sigtramp,
   422  // written in assembly code.
   423  // This is called by the signal handler, and the world may be stopped.
   424  //
   425  // It must be nosplit because getg() is still the G that was running
   426  // (if any) when the signal was delivered, but it's (usually) called
   427  // on the gsignal stack. Until this switches the G to gsignal, the
   428  // stack bounds check won't work.
   429  //
   430  //go:nosplit
   431  //go:nowritebarrierrec
   432  func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
   433  	if sigfwdgo(sig, info, ctx) {
   434  		return
   435  	}
   436  	c := &sigctxt{info, ctx}
   437  	gp := sigFetchG(c)
   438  	setg(gp)
   439  	if gp == nil || (gp.m != nil && gp.m.isExtraInC) {
   440  		if sig == _SIGPROF {
   441  			// Some platforms (Linux) have per-thread timers, which we use in
   442  			// combination with the process-wide timer. Avoid double-counting.
   443  			if validSIGPROF(nil, c) {
   444  				sigprofNonGoPC(c.sigpc())
   445  			}
   446  			return
   447  		}
   448  		if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
   449  			// This is probably a signal from preemptM sent
   450  			// while executing Go code but received while
   451  			// executing non-Go code.
   452  			// We got past sigfwdgo, so we know that there is
   453  			// no non-Go signal handler for sigPreempt.
   454  			// The default behavior for sigPreempt is to ignore
   455  			// the signal, so badsignal will be a no-op anyway.
   456  			if GOOS == "darwin" || GOOS == "ios" {
   457  				pendingPreemptSignals.Add(-1)
   458  			}
   459  			return
   460  		}
   461  		c.fixsigcode(sig)
   462  		// Set g to nil here and badsignal will use g0 by needm.
   463  		// TODO: reuse the current m here by using the gsignal and adjustSignalStack,
   464  		// since the current g maybe a normal goroutine and actually running on the signal stack,
   465  		// it may hit stack split that is not expected here.
   466  		if gp != nil {
   467  			setg(nil)
   468  		}
   469  		badsignal(uintptr(sig), c)
   470  		// Restore g
   471  		if gp != nil {
   472  			setg(gp)
   473  		}
   474  		return
   475  	}
   476  
   477  	setg(gp.m.gsignal)
   478  
   479  	// If some non-Go code called sigaltstack, adjust.
   480  	var gsignalStack gsignalStack
   481  	setStack := adjustSignalStack(sig, gp.m, &gsignalStack)
   482  	if setStack {
   483  		gp.m.gsignal.stktopsp = sys.GetCallerSP()
   484  	}
   485  
   486  	if gp.stackguard0 == stackFork {
   487  		signalDuringFork(sig)
   488  	}
   489  
   490  	c.fixsigcode(sig)
   491  	sighandler(sig, info, ctx, gp)
   492  
   493  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   494  		atomic.Store(&gp.m.signalSecret, 1)
   495  	}
   496  
   497  	setg(gp)
   498  	if setStack {
   499  		restoreGsignalStack(&gsignalStack)
   500  	}
   501  }
   502  
   503  // If the signal handler receives a SIGPROF signal on a non-Go thread,
   504  // it tries to collect a traceback into sigprofCallers.
   505  // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
   506  var sigprofCallers cgoCallers
   507  var sigprofCallersUse uint32
   508  
   509  // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
   510  // and the signal handler collected a stack trace in sigprofCallers.
   511  // When this is called, sigprofCallersUse will be non-zero.
   512  // g is nil, and what we can do is very limited.
   513  //
   514  // It is called from the signal handling functions written in assembly code that
   515  // are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
   516  // not verified that the SIGPROF delivery corresponds to the best available
   517  // profiling source for this thread.
   518  //
   519  //go:nosplit
   520  //go:nowritebarrierrec
   521  func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
   522  	if prof.hz.Load() != 0 {
   523  		c := &sigctxt{info, ctx}
   524  		// Some platforms (Linux) have per-thread timers, which we use in
   525  		// combination with the process-wide timer. Avoid double-counting.
   526  		if validSIGPROF(nil, c) {
   527  			n := 0
   528  			for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
   529  				n++
   530  			}
   531  			cpuprof.addNonGo(sigprofCallers[:n])
   532  		}
   533  	}
   534  
   535  	atomic.Store(&sigprofCallersUse, 0)
   536  }
   537  
   538  // sigprofNonGoPC is called when a profiling signal arrived on a
   539  // non-Go thread and we have a single PC value, not a stack trace.
   540  // g is nil, and what we can do is very limited.
   541  //
   542  //go:nosplit
   543  //go:nowritebarrierrec
   544  func sigprofNonGoPC(pc uintptr) {
   545  	if prof.hz.Load() != 0 {
   546  		stk := []uintptr{
   547  			pc,
   548  			abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
   549  		}
   550  		cpuprof.addNonGo(stk)
   551  	}
   552  }
   553  
   554  // adjustSignalStack adjusts the current stack guard based on the
   555  // stack pointer that is actually in use while handling a signal.
   556  // We do this in case some non-Go code called sigaltstack.
   557  // This reports whether the stack was adjusted, and if so stores the old
   558  // signal stack in *gsigstack.
   559  //
   560  //go:nosplit
   561  func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
   562  	sp := uintptr(unsafe.Pointer(&sig))
   563  	if sp >= mp.gsignal.stack.lo && sp < mp.gsignal.stack.hi {
   564  		return false
   565  	}
   566  
   567  	var st stackt
   568  	sigaltstack(nil, &st)
   569  	stsp := uintptr(unsafe.Pointer(st.ss_sp))
   570  	if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
   571  		setGsignalStack(&st, gsigStack)
   572  		return true
   573  	}
   574  
   575  	if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
   576  		// The signal was delivered on the g0 stack.
   577  		// This can happen when linked with C code
   578  		// using the thread sanitizer, which collects
   579  		// signals then delivers them itself by calling
   580  		// the signal handler directly when C code,
   581  		// including C code called via cgo, calls a
   582  		// TSAN-intercepted function such as malloc.
   583  		//
   584  		// We check this condition last as g0.stack.lo
   585  		// may be not very accurate (see mstart).
   586  		st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
   587  		setSignalstackSP(&st, mp.g0.stack.lo)
   588  		setGsignalStack(&st, gsigStack)
   589  		return true
   590  	}
   591  
   592  	// sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
   593  	// Call indirectly to avoid nosplit stack overflow on OpenBSD.
   594  	adjustSignalStack2Indirect(sig, sp, mp, st.ss_flags&_SS_DISABLE != 0)
   595  	return false
   596  }
   597  
   598  var adjustSignalStack2Indirect = adjustSignalStack2
   599  
   600  //go:nosplit
   601  func adjustSignalStack2(sig uint32, sp uintptr, mp *m, ssDisable bool) {
   602  	setg(nil)
   603  	needm(true)
   604  	if ssDisable {
   605  		noSignalStack(sig)
   606  	} else {
   607  		sigNotOnStack(sig, sp, mp)
   608  	}
   609  	dropm()
   610  }
   611  
   612  // crashing is the number of m's we have waited for when implementing
   613  // GOTRACEBACK=crash when a signal is received.
   614  var crashing atomic.Int32
   615  
   616  // testSigtrap and testSigusr1 are used by the runtime tests. If
   617  // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
   618  // normal behavior on this signal is suppressed.
   619  var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
   620  var testSigusr1 func(gp *g) bool
   621  
   622  // sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065.
   623  var sigsysIgnored uint32
   624  
   625  //go:linkname ignoreSIGSYS os.ignoreSIGSYS
   626  func ignoreSIGSYS() {
   627  	atomic.Store(&sigsysIgnored, 1)
   628  }
   629  
   630  //go:linkname restoreSIGSYS os.restoreSIGSYS
   631  func restoreSIGSYS() {
   632  	atomic.Store(&sigsysIgnored, 0)
   633  }
   634  
   635  // sighandler is invoked when a signal occurs. The global g will be
   636  // set to a gsignal goroutine and we will be running on the alternate
   637  // signal stack. The parameter gp will be the value of the global g
   638  // when the signal occurred. The sig, info, and ctxt parameters are
   639  // from the system signal handler: they are the parameters passed when
   640  // the SA is passed to the sigaction system call.
   641  //
   642  // The garbage collector may have stopped the world, so write barriers
   643  // are not allowed.
   644  //
   645  //go:nowritebarrierrec
   646  func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
   647  	// The g executing the signal handler. This is almost always
   648  	// mp.gsignal. See delayedSignal for an exception.
   649  	gsignal := getg()
   650  	mp := gsignal.m
   651  	c := &sigctxt{info, ctxt}
   652  
   653  	// Cgo TSAN (not the Go race detector) intercepts signals and calls the
   654  	// signal handler at a later time. When the signal handler is called, the
   655  	// memory may have changed, but the signal context remains old. The
   656  	// unmatched signal context and memory makes it unsafe to unwind or inspect
   657  	// the stack. So we ignore delayed non-fatal signals that will cause a stack
   658  	// inspection (profiling signal and preemption signal).
   659  	// cgo_yield is only non-nil for TSAN, and is specifically used to trigger
   660  	// signal delivery. We use that as an indicator of delayed signals.
   661  	// For delayed signals, the handler is called on the g0 stack (see
   662  	// adjustSignalStack).
   663  	delayedSignal := *cgo_yield != nil && mp != nil && gsignal.stack == mp.g0.stack
   664  
   665  	if sig == _SIGPROF {
   666  		// Some platforms (Linux) have per-thread timers, which we use in
   667  		// combination with the process-wide timer. Avoid double-counting.
   668  		if !delayedSignal && validSIGPROF(mp, c) {
   669  			sigprof(c.sigpc(), c.sigsp(), c.siglr(), gp, mp)
   670  		}
   671  		return
   672  	}
   673  
   674  	if sig == _SIGTRAP && testSigtrap != nil && testSigtrap(info, (*sigctxt)(noescape(unsafe.Pointer(c))), gp) {
   675  		return
   676  	}
   677  
   678  	if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
   679  		return
   680  	}
   681  
   682  	if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
   683  		// sigPerThreadSyscall is the same signal used by glibc for
   684  		// per-thread syscalls on Linux. We use it for the same purpose
   685  		// in non-cgo binaries. Since this signal is not _SigNotify,
   686  		// there is nothing more to do once we run the syscall.
   687  		runPerThreadSyscall()
   688  		return
   689  	}
   690  
   691  	if sig == sigPreempt && debug.asyncpreemptoff == 0 && !delayedSignal {
   692  		// Might be a preemption signal.
   693  		doSigPreempt(gp, c)
   694  		// Even if this was definitely a preemption signal, it
   695  		// may have been coalesced with another signal, so we
   696  		// still let it through to the application.
   697  	}
   698  
   699  	flags := int32(_SigThrow)
   700  	if sig < uint32(len(sigtable)) {
   701  		flags = sigtable[sig].flags
   702  	}
   703  	if !c.sigFromUser() && flags&_SigPanic != 0 && (gp.throwsplit || gp != mp.curg) {
   704  		// We can't safely sigpanic because it may grow the
   705  		// stack. Abort in the signal handler instead.
   706  		//
   707  		// Also don't inject a sigpanic if we are not on a
   708  		// user G stack. Either we're in the runtime, or we're
   709  		// running C code. Either way we cannot recover.
   710  		flags = _SigThrow
   711  	}
   712  	if isAbortPC(c.sigpc()) {
   713  		// On many architectures, the abort function just
   714  		// causes a memory fault. Don't turn that into a panic.
   715  		flags = _SigThrow
   716  	}
   717  	if !c.sigFromUser() && flags&_SigPanic != 0 {
   718  		// The signal is going to cause a panic.
   719  		// Arrange the stack so that it looks like the point
   720  		// where the signal occurred made a call to the
   721  		// function sigpanic. Then set the PC to sigpanic.
   722  
   723  		// Have to pass arguments out of band since
   724  		// augmenting the stack frame would break
   725  		// the unwinding code.
   726  		gp.sig = sig
   727  		gp.sigcode0 = uintptr(c.sigcode())
   728  		gp.sigcode1 = c.fault()
   729  		gp.sigpc = c.sigpc()
   730  
   731  		c.preparePanic(sig, gp)
   732  		return
   733  	}
   734  
   735  	if c.sigFromUser() || flags&_SigNotify != 0 {
   736  		if sigsend(sig) {
   737  			return
   738  		}
   739  	}
   740  
   741  	if c.sigFromUser() && signal_ignored(sig) {
   742  		return
   743  	}
   744  
   745  	if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 {
   746  		return
   747  	}
   748  
   749  	if flags&_SigKill != 0 {
   750  		dieFromSignal(sig)
   751  	}
   752  
   753  	// _SigThrow means that we should exit now.
   754  	// If we get here with _SigPanic, it means that the signal
   755  	// was sent to us by a program (c.sigFromUser() is true);
   756  	// in that case, if we didn't handle it in sigsend, we exit now.
   757  	if flags&(_SigThrow|_SigPanic) == 0 {
   758  		return
   759  	}
   760  
   761  	mp.throwing = throwTypeRuntime
   762  	mp.caughtsig.set(gp)
   763  
   764  	if crashing.Load() == 0 {
   765  		startpanic_m()
   766  	}
   767  
   768  	gp = fatalsignal(sig, c, gp, mp)
   769  
   770  	level, _, docrash := gotraceback()
   771  	if level > 0 {
   772  		goroutineheader(gp)
   773  		tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
   774  		if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
   775  			// tracebackothers on original m skipped this one; trace it now.
   776  			goroutineheader(mp.curg)
   777  			traceback(^uintptr(0), ^uintptr(0), 0, mp.curg)
   778  		} else if crashing.Load() == 0 {
   779  			tracebackothers(gp)
   780  			print("\n")
   781  		}
   782  		dumpregs(c)
   783  	}
   784  
   785  	if docrash {
   786  		var crashSleepMicros uint32 = 5000
   787  		var watchdogTimeoutMicros uint32 = 2000 * crashSleepMicros
   788  
   789  		isCrashThread := false
   790  		if crashing.CompareAndSwap(0, 1) {
   791  			isCrashThread = true
   792  		} else {
   793  			crashing.Add(1)
   794  		}
   795  		if crashing.Load() < mcount()-int32(extraMLength.Load()) {
   796  			// There are other m's that need to dump their stacks.
   797  			// Relay SIGQUIT to the next m by sending it to the current process.
   798  			// All m's that have already received SIGQUIT have signal masks blocking
   799  			// receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
   800  			// The first m will wait until all ms received the SIGQUIT, then crash/exit.
   801  			// Just in case the relaying gets botched, each m involved in
   802  			// the relay sleeps for 5 seconds and then does the crash/exit itself.
   803  			// The faulting m is crashing first so it is the faulting thread in the core dump (see issue #63277):
   804  			// in expected operation, the first m will wait until the last m has received the SIGQUIT,
   805  			// and then run crash/exit and the process is gone.
   806  			// However, if it spends more than 10 seconds to send SIGQUIT to all ms,
   807  			// any of ms may crash/exit the process after waiting for 10 seconds.
   808  			print("\n-----\n\n")
   809  			raiseproc(_SIGQUIT)
   810  		}
   811  		if isCrashThread {
   812  			// Sleep for short intervals so that we can crash quickly after all ms have received SIGQUIT.
   813  			// Reset the timer whenever we see more ms received SIGQUIT
   814  			// to make it have enough time to crash (see issue #64752).
   815  			timeout := watchdogTimeoutMicros
   816  			maxCrashing := crashing.Load()
   817  			for timeout > 0 && (crashing.Load() < mcount()-int32(extraMLength.Load())) {
   818  				usleep(crashSleepMicros)
   819  				timeout -= crashSleepMicros
   820  
   821  				if c := crashing.Load(); c > maxCrashing {
   822  					// We make progress, so reset the watchdog timeout
   823  					maxCrashing = c
   824  					timeout = watchdogTimeoutMicros
   825  				}
   826  			}
   827  		} else {
   828  			maxCrashing := int32(0)
   829  			c := crashing.Load()
   830  			for c > maxCrashing {
   831  				maxCrashing = c
   832  				usleep(watchdogTimeoutMicros)
   833  				c = crashing.Load()
   834  			}
   835  		}
   836  		printDebugLog()
   837  		crash()
   838  	}
   839  
   840  	printDebugLog()
   841  
   842  	exit(2)
   843  }
   844  
   845  func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g {
   846  	if sig < uint32(len(sigtable)) {
   847  		print(sigtable[sig].name, "\n")
   848  	} else {
   849  		print("Signal ", sig, "\n")
   850  	}
   851  
   852  	if isSecureMode() {
   853  		exit(2)
   854  	}
   855  
   856  	print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode())
   857  	if sig == _SIGSEGV || sig == _SIGBUS {
   858  		print(" addr=", hex(c.fault()))
   859  	}
   860  	print("\n")
   861  	if mp.incgo && gp == mp.g0 && mp.curg != nil {
   862  		print("signal arrived during cgo execution\n")
   863  		// Switch to curg so that we get a traceback of the Go code
   864  		// leading up to the cgocall, which switched from curg to g0.
   865  		gp = mp.curg
   866  	}
   867  	if sig == _SIGILL || sig == _SIGFPE {
   868  		// It would be nice to know how long the instruction is.
   869  		// Unfortunately, that's complicated to do in general (mostly for x86
   870  		// and s930x, but other archs have non-standard instruction lengths also).
   871  		// Opt to print 16 bytes, which covers most instructions.
   872  		const maxN = 16
   873  		n := uintptr(maxN)
   874  		// We have to be careful, though. If we're near the end of
   875  		// a page and the following page isn't mapped, we could
   876  		// segfault. So make sure we don't straddle a page (even though
   877  		// that could lead to printing an incomplete instruction).
   878  		// We're assuming here we can read at least the page containing the PC.
   879  		// I suppose it is possible that the page is mapped executable but not readable?
   880  		pc := c.sigpc()
   881  		if n > physPageSize-pc%physPageSize {
   882  			n = physPageSize - pc%physPageSize
   883  		}
   884  		print("instruction bytes:")
   885  		b := (*[maxN]byte)(unsafe.Pointer(pc))
   886  		for i := uintptr(0); i < n; i++ {
   887  			print(" ", hex(b[i]))
   888  		}
   889  		println()
   890  	}
   891  	print("\n")
   892  	return gp
   893  }
   894  
   895  // sigpanic turns a synchronous signal into a run-time panic.
   896  // If the signal handler sees a synchronous panic, it arranges the
   897  // stack to look like the function where the signal occurred called
   898  // sigpanic, sets the signal's PC value to sigpanic, and returns from
   899  // the signal handler. The effect is that the program will act as
   900  // though the function that got the signal simply called sigpanic
   901  // instead.
   902  //
   903  // This must NOT be nosplit because the linker doesn't know where
   904  // sigpanic calls can be injected.
   905  //
   906  // The signal handler must not inject a call to sigpanic if
   907  // getg().throwsplit, since sigpanic may need to grow the stack.
   908  //
   909  // This is exported via linkname to assembly in runtime/cgo.
   910  //
   911  //go:linkname sigpanic
   912  func sigpanic() {
   913  	gp := getg()
   914  	if !canpanic() {
   915  		throw("unexpected signal during runtime execution")
   916  	}
   917  
   918  	switch gp.sig {
   919  	case _SIGBUS:
   920  		if gp.sigcode0 == _BUS_ADRERR && gp.sigcode1 < 0x1000 {
   921  			panicmem()
   922  		}
   923  		// Support runtime/debug.SetPanicOnFault.
   924  		if gp.paniconfault {
   925  			panicmemAddr(gp.sigcode1)
   926  		}
   927  		print("unexpected fault address ", hex(gp.sigcode1), "\n")
   928  		throw("fault")
   929  	case _SIGSEGV:
   930  		if (gp.sigcode0 == 0 || gp.sigcode0 == _SEGV_MAPERR || gp.sigcode0 == _SEGV_ACCERR) && gp.sigcode1 < 0x1000 {
   931  			panicmem()
   932  		}
   933  		// Support runtime/debug.SetPanicOnFault.
   934  		if gp.paniconfault {
   935  			panicmemAddr(gp.sigcode1)
   936  		}
   937  		if inUserArenaChunk(gp.sigcode1) {
   938  			// We could check that the arena chunk is explicitly set to fault,
   939  			// but the fact that we faulted on accessing it is enough to prove
   940  			// that it is.
   941  			print("accessed data from freed user arena ", hex(gp.sigcode1), "\n")
   942  		} else {
   943  			print("unexpected fault address ", hex(gp.sigcode1), "\n")
   944  		}
   945  		throw("fault")
   946  	case _SIGFPE:
   947  		switch gp.sigcode0 {
   948  		case _FPE_INTDIV:
   949  			panicdivide()
   950  		case _FPE_INTOVF:
   951  			panicoverflow()
   952  		}
   953  		panicfloat()
   954  	}
   955  
   956  	if gp.sig >= uint32(len(sigtable)) {
   957  		// can't happen: we looked up gp.sig in sigtable to decide to call sigpanic
   958  		throw("unexpected signal value")
   959  	}
   960  	panic(errorString(sigtable[gp.sig].name))
   961  }
   962  
   963  // dieFromSignal kills the program with a signal.
   964  // This provides the expected exit status for the shell.
   965  // This is only called with fatal signals expected to kill the process.
   966  //
   967  //go:nosplit
   968  //go:nowritebarrierrec
   969  func dieFromSignal(sig uint32) {
   970  	unblocksig(sig)
   971  	// Mark the signal as unhandled to ensure it is forwarded.
   972  	atomic.Store(&handlingSig[sig], 0)
   973  	raise(sig)
   974  
   975  	// That should have killed us. On some systems, though, raise
   976  	// sends the signal to the whole process rather than to just
   977  	// the current thread, which means that the signal may not yet
   978  	// have been delivered. Give other threads a chance to run and
   979  	// pick up the signal.
   980  	osyield()
   981  	osyield()
   982  	osyield()
   983  
   984  	// If that didn't work, try _SIG_DFL.
   985  	setsig(sig, _SIG_DFL)
   986  	raise(sig)
   987  
   988  	osyield()
   989  	osyield()
   990  	osyield()
   991  
   992  	// If we are still somehow running, just exit with the wrong status.
   993  	exit(2)
   994  }
   995  
   996  // raisebadsignal is called when a signal is received on a non-Go
   997  // thread, and the Go program does not want to handle it (that is, the
   998  // program has not called os/signal.Notify for the signal).
   999  func raisebadsignal(sig uint32, c *sigctxt) {
  1000  	if sig == _SIGPROF {
  1001  		// Ignore profiling signals that arrive on non-Go threads.
  1002  		return
  1003  	}
  1004  
  1005  	var handler uintptr
  1006  	var flags int32
  1007  	if sig >= _NSIG {
  1008  		handler = _SIG_DFL
  1009  	} else {
  1010  		handler = atomic.Loaduintptr(&fwdSig[sig])
  1011  		flags = sigtable[sig].flags
  1012  	}
  1013  
  1014  	// If the signal is ignored, raising the signal is no-op.
  1015  	if handler == _SIG_IGN || (handler == _SIG_DFL && flags&_SigIgn != 0) {
  1016  		return
  1017  	}
  1018  
  1019  	// Reset the signal handler and raise the signal.
  1020  	// We are currently running inside a signal handler, so the
  1021  	// signal is blocked. We need to unblock it before raising the
  1022  	// signal, or the signal we raise will be ignored until we return
  1023  	// from the signal handler. We know that the signal was unblocked
  1024  	// before entering the handler, or else we would not have received
  1025  	// it. That means that we don't have to worry about blocking it
  1026  	// again.
  1027  	unblocksig(sig)
  1028  	setsig(sig, handler)
  1029  
  1030  	// If we're linked into a non-Go program we want to try to
  1031  	// avoid modifying the original context in which the signal
  1032  	// was raised. If the handler is the default, we know it
  1033  	// is non-recoverable, so we don't have to worry about
  1034  	// re-installing sighandler. At this point we can just
  1035  	// return and the signal will be re-raised and caught by
  1036  	// the default handler with the correct context.
  1037  	//
  1038  	// On FreeBSD, the libthr sigaction code prevents
  1039  	// this from working so we fall through to raise.
  1040  	if GOOS != "freebsd" && (isarchive || islibrary) && handler == _SIG_DFL && !c.sigFromUser() {
  1041  		return
  1042  	}
  1043  
  1044  	raise(sig)
  1045  
  1046  	// Give the signal a chance to be delivered.
  1047  	// In almost all real cases the program is about to crash,
  1048  	// so sleeping here is not a waste of time.
  1049  	usleep(1000)
  1050  
  1051  	// If the signal didn't cause the program to exit, restore the
  1052  	// Go signal handler and carry on.
  1053  	//
  1054  	// We may receive another instance of the signal before we
  1055  	// restore the Go handler, but that is not so bad: we know
  1056  	// that the Go program has been ignoring the signal.
  1057  	setsig(sig, abi.FuncPCABIInternal(sighandler))
  1058  }
  1059  
  1060  //go:nosplit
  1061  func crash() {
  1062  	dieFromSignal(_SIGABRT)
  1063  }
  1064  
  1065  // ensureSigM starts one global, sleeping thread to make sure at least one thread
  1066  // is available to catch signals enabled for os/signal.
  1067  func ensureSigM() {
  1068  	if maskUpdatedChan != nil {
  1069  		return
  1070  	}
  1071  	maskUpdatedChan = make(chan struct{})
  1072  	disableSigChan = make(chan uint32)
  1073  	enableSigChan = make(chan uint32)
  1074  	go func() {
  1075  		// Signal masks are per-thread, so make sure this goroutine stays on one
  1076  		// thread.
  1077  		LockOSThread()
  1078  		defer UnlockOSThread()
  1079  		// The sigBlocked mask contains the signals not active for os/signal,
  1080  		// initially all signals except the essential. When signal.Notify()/Stop is called,
  1081  		// sigenable/sigdisable in turn notify this thread to update its signal
  1082  		// mask accordingly.
  1083  		sigBlocked := sigset_all
  1084  		for i := range sigtable {
  1085  			if !blockableSig(uint32(i)) {
  1086  				sigdelset(&sigBlocked, i)
  1087  			}
  1088  		}
  1089  		sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
  1090  		for {
  1091  			select {
  1092  			case sig := <-enableSigChan:
  1093  				if sig > 0 {
  1094  					sigdelset(&sigBlocked, int(sig))
  1095  				}
  1096  			case sig := <-disableSigChan:
  1097  				if sig > 0 && blockableSig(sig) {
  1098  					sigaddset(&sigBlocked, int(sig))
  1099  				}
  1100  			}
  1101  			sigprocmask(_SIG_SETMASK, &sigBlocked, nil)
  1102  			maskUpdatedChan <- struct{}{}
  1103  		}
  1104  	}()
  1105  }
  1106  
  1107  // This is called when we receive a signal when there is no signal stack.
  1108  // This can only happen if non-Go code calls sigaltstack to disable the
  1109  // signal stack.
  1110  func noSignalStack(sig uint32) {
  1111  	println("signal", sig, "received on thread with no signal stack")
  1112  	throw("non-Go code disabled sigaltstack")
  1113  }
  1114  
  1115  // This is called if we receive a signal when there is a signal stack
  1116  // but we are not on it. This can only happen if non-Go code called
  1117  // sigaction without setting the SS_ONSTACK flag.
  1118  func sigNotOnStack(sig uint32, sp uintptr, mp *m) {
  1119  	println("signal", sig, "received but handler not on signal stack")
  1120  	print("mp.gsignal stack [", hex(mp.gsignal.stack.lo), " ", hex(mp.gsignal.stack.hi), "], ")
  1121  	print("mp.g0 stack [", hex(mp.g0.stack.lo), " ", hex(mp.g0.stack.hi), "], sp=", hex(sp), "\n")
  1122  	throw("non-Go code set up signal handler without SA_ONSTACK flag")
  1123  }
  1124  
  1125  // signalDuringFork is called if we receive a signal while doing a fork.
  1126  // We do not want signals at that time, as a signal sent to the process
  1127  // group may be delivered to the child process, causing confusion.
  1128  // This should never be called, because we block signals across the fork;
  1129  // this function is just a safety check. See issue 18600 for background.
  1130  func signalDuringFork(sig uint32) {
  1131  	println("signal", sig, "received during fork")
  1132  	throw("signal received during fork")
  1133  }
  1134  
  1135  // This runs on a foreign stack, without an m or a g. No stack split.
  1136  //
  1137  //go:nosplit
  1138  //go:norace
  1139  //go:nowritebarrierrec
  1140  func badsignal(sig uintptr, c *sigctxt) {
  1141  	if !iscgo && !cgoHasExtraM {
  1142  		// There is no extra M. needm will not be able to grab
  1143  		// an M. Instead of hanging, just crash.
  1144  		// Cannot call split-stack function as there is no G.
  1145  		writeErrStr("fatal: bad g in signal handler\n")
  1146  		exit(2)
  1147  		*(*uintptr)(unsafe.Pointer(uintptr(123))) = 2
  1148  	}
  1149  	needm(true)
  1150  	if !sigsend(uint32(sig)) {
  1151  		// A foreign thread received the signal sig, and the
  1152  		// Go code does not want to handle it.
  1153  		raisebadsignal(uint32(sig), c)
  1154  	}
  1155  	dropm()
  1156  }
  1157  
  1158  //go:noescape
  1159  func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
  1160  
  1161  // Determines if the signal should be handled by Go and if not, forwards the
  1162  // signal to the handler that was installed before Go's. Returns whether the
  1163  // signal was forwarded.
  1164  // This is called by the signal handler, and the world may be stopped.
  1165  //
  1166  //go:nosplit
  1167  //go:nowritebarrierrec
  1168  func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
  1169  	if sig >= uint32(len(sigtable)) {
  1170  		return false
  1171  	}
  1172  	fwdFn := atomic.Loaduintptr(&fwdSig[sig])
  1173  	flags := sigtable[sig].flags
  1174  
  1175  	// If we aren't handling the signal, forward it.
  1176  	if atomic.Load(&handlingSig[sig]) == 0 || !signalsOK {
  1177  		// If the signal is ignored, doing nothing is the same as forwarding.
  1178  		if fwdFn == _SIG_IGN || (fwdFn == _SIG_DFL && flags&_SigIgn != 0) {
  1179  			return true
  1180  		}
  1181  		// We are not handling the signal and there is no other handler to forward to.
  1182  		// Crash with the default behavior.
  1183  		if fwdFn == _SIG_DFL {
  1184  			setsig(sig, _SIG_DFL)
  1185  			dieFromSignal(sig)
  1186  			return false
  1187  		}
  1188  
  1189  		sigfwd(fwdFn, sig, info, ctx)
  1190  		return true
  1191  	}
  1192  
  1193  	// This function and its caller sigtrampgo assumes SIGPIPE is delivered on the
  1194  	// originating thread. This property does not hold on macOS (golang.org/issue/33384),
  1195  	// so we have no choice but to ignore SIGPIPE.
  1196  	if (GOOS == "darwin" || GOOS == "ios") && sig == _SIGPIPE {
  1197  		return true
  1198  	}
  1199  
  1200  	// If there is no handler to forward to, no need to forward.
  1201  	if fwdFn == _SIG_DFL {
  1202  		return false
  1203  	}
  1204  
  1205  	c := &sigctxt{info, ctx}
  1206  	// Only forward synchronous signals and SIGPIPE.
  1207  	// Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
  1208  	// is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
  1209  	// or pipe.
  1210  	if (c.sigFromUser() || flags&_SigPanic == 0) && sig != _SIGPIPE {
  1211  		return false
  1212  	}
  1213  	// Determine if the signal occurred inside Go code. We test that:
  1214  	//   (1) we weren't in VDSO page,
  1215  	//   (2) we were in a goroutine (i.e., m.curg != nil), and
  1216  	//   (3) we weren't in CGO.
  1217  	//   (4) we weren't in dropped extra m.
  1218  	gp := sigFetchG(c)
  1219  	if gp != nil && gp.m != nil && gp.m.curg != nil && !gp.m.isExtraInC && !gp.m.incgo {
  1220  		return false
  1221  	}
  1222  
  1223  	// Signal not handled by Go, forward it.
  1224  	if fwdFn != _SIG_IGN {
  1225  		sigfwd(fwdFn, sig, info, ctx)
  1226  	}
  1227  
  1228  	return true
  1229  }
  1230  
  1231  // sigsave saves the current thread's signal mask into *p.
  1232  // This is used to preserve the non-Go signal mask when a non-Go
  1233  // thread calls a Go function.
  1234  // This is nosplit and nowritebarrierrec because it is called by needm
  1235  // which may be called on a non-Go thread with no g available.
  1236  //
  1237  //go:nosplit
  1238  //go:nowritebarrierrec
  1239  func sigsave(p *sigset) {
  1240  	sigprocmask(_SIG_SETMASK, nil, p)
  1241  }
  1242  
  1243  // msigrestore sets the current thread's signal mask to sigmask.
  1244  // This is used to restore the non-Go signal mask when a non-Go thread
  1245  // calls a Go function.
  1246  // This is nosplit and nowritebarrierrec because it is called by dropm
  1247  // after g has been cleared.
  1248  //
  1249  //go:nosplit
  1250  //go:nowritebarrierrec
  1251  func msigrestore(sigmask sigset) {
  1252  	sigprocmask(_SIG_SETMASK, &sigmask, nil)
  1253  }
  1254  
  1255  // sigsetAllExiting is used by sigblock(true) when a thread is
  1256  // exiting.
  1257  var sigsetAllExiting = func() sigset {
  1258  	res := sigset_all
  1259  
  1260  	// Apply GOOS-specific overrides here, rather than in osinit,
  1261  	// because osinit may be called before sigsetAllExiting is
  1262  	// initialized (#51913).
  1263  	if GOOS == "linux" && iscgo {
  1264  		// #42494 glibc and musl reserve some signals for
  1265  		// internal use and require they not be blocked by
  1266  		// the rest of a normal C runtime. When the go runtime
  1267  		// blocks...unblocks signals, temporarily, the blocked
  1268  		// interval of time is generally very short. As such,
  1269  		// these expectations of *libc code are mostly met by
  1270  		// the combined go+cgo system of threads. However,
  1271  		// when go causes a thread to exit, via a return from
  1272  		// mstart(), the combined runtime can deadlock if
  1273  		// these signals are blocked. Thus, don't block these
  1274  		// signals when exiting threads.
  1275  		// - glibc: SIGCANCEL (32), SIGSETXID (33)
  1276  		// - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
  1277  		sigdelset(&res, 32)
  1278  		sigdelset(&res, 33)
  1279  		sigdelset(&res, 34)
  1280  	}
  1281  
  1282  	return res
  1283  }()
  1284  
  1285  // sigblock blocks signals in the current thread's signal mask.
  1286  // This is used to block signals while setting up and tearing down g
  1287  // when a non-Go thread calls a Go function. When a thread is exiting
  1288  // we use the sigsetAllExiting value, otherwise the OS specific
  1289  // definition of sigset_all is used.
  1290  // This is nosplit and nowritebarrierrec because it is called by needm
  1291  // which may be called on a non-Go thread with no g available.
  1292  //
  1293  //go:nosplit
  1294  //go:nowritebarrierrec
  1295  func sigblock(exiting bool) {
  1296  	if exiting {
  1297  		sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
  1298  		return
  1299  	}
  1300  	sigprocmask(_SIG_SETMASK, &sigset_all, nil)
  1301  }
  1302  
  1303  // unblocksig removes sig from the current thread's signal mask.
  1304  // This is nosplit and nowritebarrierrec because it is called from
  1305  // dieFromSignal, which can be called by sigfwdgo while running in the
  1306  // signal handler, on the signal stack, with no g available.
  1307  //
  1308  //go:nosplit
  1309  //go:nowritebarrierrec
  1310  func unblocksig(sig uint32) {
  1311  	var set sigset
  1312  	sigaddset(&set, int(sig))
  1313  	sigprocmask(_SIG_UNBLOCK, &set, nil)
  1314  }
  1315  
  1316  // minitSignals is called when initializing a new m to set the
  1317  // thread's alternate signal stack and signal mask.
  1318  func minitSignals() {
  1319  	minitSignalStack()
  1320  	minitSignalMask()
  1321  }
  1322  
  1323  // minitSignalStack is called when initializing a new m to set the
  1324  // alternate signal stack. If the alternate signal stack is not set
  1325  // for the thread (the normal case) then set the alternate signal
  1326  // stack to the gsignal stack. If the alternate signal stack is set
  1327  // for the thread (the case when a non-Go thread sets the alternate
  1328  // signal stack and then calls a Go function) then set the gsignal
  1329  // stack to the alternate signal stack. We also set the alternate
  1330  // signal stack to the gsignal stack if cgo is not used (regardless
  1331  // of whether it is already set). Record which choice was made in
  1332  // newSigstack, so that it can be undone in unminit.
  1333  func minitSignalStack() {
  1334  	mp := getg().m
  1335  	var st stackt
  1336  	sigaltstack(nil, &st)
  1337  	if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
  1338  		signalstack(&mp.gsignal.stack)
  1339  		mp.newSigstack = true
  1340  	} else {
  1341  		setGsignalStack(&st, &mp.goSigStack)
  1342  		mp.newSigstack = false
  1343  	}
  1344  }
  1345  
  1346  // minitSignalMask is called when initializing a new m to set the
  1347  // thread's signal mask. When this is called all signals have been
  1348  // blocked for the thread.  This starts with m.sigmask, which was set
  1349  // either from initSigmask for a newly created thread or by calling
  1350  // sigsave if this is a non-Go thread calling a Go function. It
  1351  // removes all essential signals from the mask, thus causing those
  1352  // signals to not be blocked. Then it sets the thread's signal mask.
  1353  // After this is called the thread can receive signals.
  1354  func minitSignalMask() {
  1355  	nmask := getg().m.sigmask
  1356  	for i := range sigtable {
  1357  		if !blockableSig(uint32(i)) {
  1358  			sigdelset(&nmask, i)
  1359  		}
  1360  	}
  1361  	sigprocmask(_SIG_SETMASK, &nmask, nil)
  1362  }
  1363  
  1364  // unminitSignals is called from dropm, via unminit, to undo the
  1365  // effect of calling minit on a non-Go thread.
  1366  //
  1367  //go:nosplit
  1368  func unminitSignals() {
  1369  	if getg().m.newSigstack {
  1370  		st := stackt{ss_flags: _SS_DISABLE}
  1371  		sigaltstack(&st, nil)
  1372  	} else {
  1373  		// We got the signal stack from someone else. Restore
  1374  		// the Go-allocated stack in case this M gets reused
  1375  		// for another thread (e.g., it's an extram). Also, on
  1376  		// Android, libc allocates a signal stack for all
  1377  		// threads, so it's important to restore the Go stack
  1378  		// even on Go-created threads so we can free it.
  1379  		restoreGsignalStack(&getg().m.goSigStack)
  1380  	}
  1381  }
  1382  
  1383  // blockableSig reports whether sig may be blocked by the signal mask.
  1384  // We never want to block the signals marked _SigUnblock;
  1385  // these are the synchronous signals that turn into a Go panic.
  1386  // We never want to block the preemption signal if it is being used.
  1387  // In a Go program--not a c-archive/c-shared--we never want to block
  1388  // the signals marked _SigKill or _SigThrow, as otherwise it's possible
  1389  // for all running threads to block them and delay their delivery until
  1390  // we start a new thread. When linked into a C program we let the C code
  1391  // decide on the disposition of those signals.
  1392  func blockableSig(sig uint32) bool {
  1393  	flags := sigtable[sig].flags
  1394  	if flags&_SigUnblock != 0 {
  1395  		return false
  1396  	}
  1397  	if sig == sigPreempt && preemptMSupported && debug.asyncpreemptoff == 0 {
  1398  		return false
  1399  	}
  1400  	if isarchive || islibrary {
  1401  		return true
  1402  	}
  1403  	return flags&(_SigKill|_SigThrow) == 0
  1404  }
  1405  
  1406  // gsignalStack saves the fields of the gsignal stack changed by
  1407  // setGsignalStack.
  1408  type gsignalStack struct {
  1409  	stack       stack
  1410  	stackguard0 uintptr
  1411  	stackguard1 uintptr
  1412  	stktopsp    uintptr
  1413  }
  1414  
  1415  // setGsignalStack sets the gsignal stack of the current m to an
  1416  // alternate signal stack returned from the sigaltstack system call.
  1417  // It saves the old values in *old for use by restoreGsignalStack.
  1418  // This is used when handling a signal if non-Go code has set the
  1419  // alternate signal stack.
  1420  //
  1421  //go:nosplit
  1422  //go:nowritebarrierrec
  1423  func setGsignalStack(st *stackt, old *gsignalStack) {
  1424  	gp := getg()
  1425  	if old != nil {
  1426  		old.stack = gp.m.gsignal.stack
  1427  		old.stackguard0 = gp.m.gsignal.stackguard0
  1428  		old.stackguard1 = gp.m.gsignal.stackguard1
  1429  		old.stktopsp = gp.m.gsignal.stktopsp
  1430  	}
  1431  	stsp := uintptr(unsafe.Pointer(st.ss_sp))
  1432  	gp.m.gsignal.stack.lo = stsp
  1433  	gp.m.gsignal.stack.hi = stsp + st.ss_size
  1434  	gp.m.gsignal.stackguard0 = stsp + stackGuard
  1435  	gp.m.gsignal.stackguard1 = stsp + stackGuard
  1436  }
  1437  
  1438  // restoreGsignalStack restores the gsignal stack to the value it had
  1439  // before entering the signal handler.
  1440  //
  1441  //go:nosplit
  1442  //go:nowritebarrierrec
  1443  func restoreGsignalStack(st *gsignalStack) {
  1444  	gp := getg().m.gsignal
  1445  	gp.stack = st.stack
  1446  	gp.stackguard0 = st.stackguard0
  1447  	gp.stackguard1 = st.stackguard1
  1448  	gp.stktopsp = st.stktopsp
  1449  }
  1450  
  1451  // signalstack sets the current thread's alternate signal stack to s.
  1452  //
  1453  //go:nosplit
  1454  func signalstack(s *stack) {
  1455  	st := stackt{ss_size: s.hi - s.lo}
  1456  	setSignalstackSP(&st, s.lo)
  1457  	sigaltstack(&st, nil)
  1458  }
  1459  
  1460  // setsigsegv is used on darwin/arm64 to fake a segmentation fault.
  1461  //
  1462  // This is exported via linkname to assembly in runtime/cgo.
  1463  //
  1464  //go:nosplit
  1465  //go:linkname setsigsegv
  1466  func setsigsegv(pc uintptr) {
  1467  	gp := getg()
  1468  	gp.sig = _SIGSEGV
  1469  	gp.sigpc = pc
  1470  	gp.sigcode0 = _SEGV_MAPERR
  1471  	gp.sigcode1 = 0 // TODO: emulate si_addr
  1472  }
  1473  

View as plain text