Source file src/runtime/stack_test.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"internal/testenv"
    10  	"reflect"
    11  	"regexp"
    12  	. "runtime"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	"time"
    18  	_ "unsafe" // for go:linkname
    19  )
    20  
    21  // TestStackMem measures per-thread stack segment cache behavior.
    22  // The test consumed up to 500MB in the past.
    23  func TestStackMem(t *testing.T) {
    24  	const (
    25  		BatchSize      = 32
    26  		BatchCount     = 256
    27  		ArraySize      = 1024
    28  		RecursionDepth = 128
    29  	)
    30  	if testing.Short() {
    31  		return
    32  	}
    33  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    34  	s0 := new(MemStats)
    35  	ReadMemStats(s0)
    36  	for b := 0; b < BatchCount; b++ {
    37  		c := make(chan bool, BatchSize)
    38  		for i := 0; i < BatchSize; i++ {
    39  			go func() {
    40  				var f func(k int, a [ArraySize]byte)
    41  				f = func(k int, a [ArraySize]byte) {
    42  					if k == 0 {
    43  						time.Sleep(time.Millisecond)
    44  						return
    45  					}
    46  					f(k-1, a)
    47  				}
    48  				f(RecursionDepth, [ArraySize]byte{})
    49  				c <- true
    50  			}()
    51  		}
    52  		for i := 0; i < BatchSize; i++ {
    53  			<-c
    54  		}
    55  
    56  		// The goroutines have signaled via c that they are ready to exit.
    57  		// Give them a chance to exit by sleeping. If we don't wait, we
    58  		// might not reuse them on the next batch.
    59  		time.Sleep(10 * time.Millisecond)
    60  	}
    61  	s1 := new(MemStats)
    62  	ReadMemStats(s1)
    63  	consumed := int64(s1.StackSys - s0.StackSys)
    64  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    65  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    66  	if consumed > estimate {
    67  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    68  	}
    69  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    70  	// StackInuse can decrease during function execution, so we cast the values to int64.
    71  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    72  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    73  	if inuse > 4<<20 {
    74  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    75  	}
    76  }
    77  
    78  // Test stack growing in different contexts.
    79  func TestStackGrowth(t *testing.T) {
    80  	if *flagQuick {
    81  		t.Skip("-quick")
    82  	}
    83  
    84  	var wg sync.WaitGroup
    85  
    86  	// in a normal goroutine
    87  	var growDuration time.Duration // For debugging failures
    88  	wg.Add(1)
    89  	go func() {
    90  		defer wg.Done()
    91  		start := time.Now()
    92  		growStack(nil)
    93  		growDuration = time.Since(start)
    94  	}()
    95  	wg.Wait()
    96  	t.Log("first growStack took", growDuration)
    97  
    98  	// in locked goroutine
    99  	wg.Add(1)
   100  	go func() {
   101  		defer wg.Done()
   102  		LockOSThread()
   103  		growStack(nil)
   104  		UnlockOSThread()
   105  	}()
   106  	wg.Wait()
   107  
   108  	// in finalizer
   109  	var finalizerStart time.Time
   110  	var started atomic.Bool
   111  	var progress atomic.Uint32
   112  	wg.Add(1)
   113  	s := new(string) // Must be of a type that avoids the tiny allocator, or else the finalizer might not run.
   114  	SetFinalizer(s, func(ss *string) {
   115  		defer wg.Done()
   116  		finalizerStart = time.Now()
   117  		started.Store(true)
   118  		growStack(&progress)
   119  	})
   120  	setFinalizerTime := time.Now()
   121  	s = nil
   122  
   123  	if d, ok := t.Deadline(); ok {
   124  		// Pad the timeout by an arbitrary 5% to give the AfterFunc time to run.
   125  		timeout := time.Until(d) * 19 / 20
   126  		timer := time.AfterFunc(timeout, func() {
   127  			// Panic — instead of calling t.Error and returning from the test — so
   128  			// that we get a useful goroutine dump if the test times out, especially
   129  			// if GOTRACEBACK=system or GOTRACEBACK=crash is set.
   130  			if !started.Load() {
   131  				panic("finalizer did not start")
   132  			} else {
   133  				panic(fmt.Sprintf("finalizer started %s ago (%s after registration) and ran %d iterations, but did not return", time.Since(finalizerStart), finalizerStart.Sub(setFinalizerTime), progress.Load()))
   134  			}
   135  		})
   136  		defer timer.Stop()
   137  	}
   138  
   139  	GC()
   140  	wg.Wait()
   141  	t.Logf("finalizer started after %s and ran %d iterations in %v", finalizerStart.Sub(setFinalizerTime), progress.Load(), time.Since(finalizerStart))
   142  }
   143  
   144  // ... and in init
   145  //func init() {
   146  //	growStack()
   147  //}
   148  
   149  func growStack(progress *atomic.Uint32) {
   150  	n := 1 << 10
   151  	if testing.Short() {
   152  		n = 1 << 8
   153  	}
   154  	for i := 0; i < n; i++ {
   155  		x := 0
   156  		growStackIter(&x, i)
   157  		if x != i+1 {
   158  			panic("stack is corrupted")
   159  		}
   160  		if progress != nil {
   161  			progress.Store(uint32(i))
   162  		}
   163  	}
   164  	GC()
   165  }
   166  
   167  // This function is not an anonymous func, so that the compiler can do escape
   168  // analysis and place x on stack (and subsequently stack growth update the pointer).
   169  func growStackIter(p *int, n int) {
   170  	if n == 0 {
   171  		*p = n + 1
   172  		GC()
   173  		return
   174  	}
   175  	*p = n + 1
   176  	x := 0
   177  	growStackIter(&x, n-1)
   178  	if x != n {
   179  		panic("stack is corrupted")
   180  	}
   181  }
   182  
   183  func TestStackGrowthCallback(t *testing.T) {
   184  	t.Parallel()
   185  	var wg sync.WaitGroup
   186  
   187  	// test stack growth at chan op
   188  	wg.Add(1)
   189  	go func() {
   190  		defer wg.Done()
   191  		c := make(chan int, 1)
   192  		growStackWithCallback(func() {
   193  			c <- 1
   194  			<-c
   195  		})
   196  	}()
   197  
   198  	// test stack growth at map op
   199  	wg.Add(1)
   200  	go func() {
   201  		defer wg.Done()
   202  		m := make(map[int]int)
   203  		growStackWithCallback(func() {
   204  			_, _ = m[1]
   205  			m[1] = 1
   206  		})
   207  	}()
   208  
   209  	// test stack growth at goroutine creation
   210  	wg.Add(1)
   211  	go func() {
   212  		defer wg.Done()
   213  		growStackWithCallback(func() {
   214  			done := make(chan bool)
   215  			go func() {
   216  				done <- true
   217  			}()
   218  			<-done
   219  		})
   220  	}()
   221  	wg.Wait()
   222  }
   223  
   224  func growStackWithCallback(cb func()) {
   225  	var f func(n int)
   226  	f = func(n int) {
   227  		if n == 0 {
   228  			cb()
   229  			return
   230  		}
   231  		f(n - 1)
   232  	}
   233  	for i := 0; i < 1<<10; i++ {
   234  		f(i)
   235  	}
   236  }
   237  
   238  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   239  // during a stack copy.
   240  func set(p *int, x int) {
   241  	*p = x
   242  }
   243  func TestDeferPtrs(t *testing.T) {
   244  	var y int
   245  
   246  	defer func() {
   247  		if y != 42 {
   248  			t.Errorf("defer's stack references were not adjusted appropriately")
   249  		}
   250  	}()
   251  	defer set(&y, 42)
   252  	growStack(nil)
   253  }
   254  
   255  type bigBuf [4 * 1024]byte
   256  
   257  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   258  // stack grows as part of starting the deferred function. It calls Goexit at various
   259  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   260  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   261  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   262  // stack growth does not invalidate a later attempt.
   263  func TestDeferPtrsGoexit(t *testing.T) {
   264  	for i := 0; i < 100; i++ {
   265  		c := make(chan int, 1)
   266  		go testDeferPtrsGoexit(c, i)
   267  		if n := <-c; n != 42 {
   268  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   269  		}
   270  	}
   271  }
   272  
   273  func testDeferPtrsGoexit(c chan int, i int) {
   274  	var y int
   275  	defer func() {
   276  		c <- y
   277  	}()
   278  	defer setBig(&y, 42, bigBuf{})
   279  	useStackAndCall(i, Goexit)
   280  }
   281  
   282  func setBig(p *int, x int, b bigBuf) {
   283  	*p = x
   284  }
   285  
   286  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   287  // of Goexit to run the Defers. Those two are different execution paths
   288  // in the runtime.
   289  func TestDeferPtrsPanic(t *testing.T) {
   290  	for i := 0; i < 100; i++ {
   291  		c := make(chan int, 1)
   292  		go testDeferPtrsGoexit(c, i)
   293  		if n := <-c; n != 42 {
   294  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   295  		}
   296  	}
   297  }
   298  
   299  func testDeferPtrsPanic(c chan int, i int) {
   300  	var y int
   301  	defer func() {
   302  		if recover() == nil {
   303  			c <- -1
   304  			return
   305  		}
   306  		c <- y
   307  	}()
   308  	defer setBig(&y, 42, bigBuf{})
   309  	useStackAndCall(i, func() { panic(1) })
   310  }
   311  
   312  //go:noinline
   313  func testDeferLeafSigpanic1() {
   314  	// Cause a sigpanic to be injected in this frame.
   315  	//
   316  	// This function has to be declared before
   317  	// TestDeferLeafSigpanic so the runtime will crash if we think
   318  	// this function's continuation PC is in
   319  	// TestDeferLeafSigpanic.
   320  	*(*int)(nil) = 0
   321  }
   322  
   323  // TestDeferLeafSigpanic tests defer matching around leaf functions
   324  // that sigpanic. This is tricky because on LR machines the outer
   325  // function and the inner function have the same SP, but it's critical
   326  // that we match up the defer correctly to get the right liveness map.
   327  // See issue #25499.
   328  func TestDeferLeafSigpanic(t *testing.T) {
   329  	// Push a defer that will walk the stack.
   330  	defer func() {
   331  		if err := recover(); err == nil {
   332  			t.Fatal("expected panic from nil pointer")
   333  		}
   334  		GC()
   335  	}()
   336  	// Call a leaf function. We must set up the exact call stack:
   337  	//
   338  	//  deferring function -> leaf function -> sigpanic
   339  	//
   340  	// On LR machines, the leaf function will have the same SP as
   341  	// the SP pushed for the defer frame.
   342  	testDeferLeafSigpanic1()
   343  }
   344  
   345  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   346  // updated correctly if the stack grows during the deferred execution that
   347  // happens as a result of the panic.
   348  func TestPanicUseStack(t *testing.T) {
   349  	pc := make([]uintptr, 10000)
   350  	defer func() {
   351  		recover()
   352  		Callers(0, pc) // force stack walk
   353  		useStackAndCall(100, func() {
   354  			defer func() {
   355  				recover()
   356  				Callers(0, pc) // force stack walk
   357  				useStackAndCall(200, func() {
   358  					defer func() {
   359  						recover()
   360  						Callers(0, pc) // force stack walk
   361  					}()
   362  					panic(3)
   363  				})
   364  			}()
   365  			panic(2)
   366  		})
   367  	}()
   368  	panic(1)
   369  }
   370  
   371  func TestPanicFar(t *testing.T) {
   372  	var xtree *xtreeNode
   373  	pc := make([]uintptr, 10000)
   374  	defer func() {
   375  		// At this point we created a large stack and unwound
   376  		// it via recovery. Force a stack walk, which will
   377  		// check the stack's consistency.
   378  		Callers(0, pc)
   379  	}()
   380  	defer func() {
   381  		recover()
   382  	}()
   383  	useStackAndCall(100, func() {
   384  		// Kick off the GC and make it do something nontrivial.
   385  		// (This used to force stack barriers to stick around.)
   386  		xtree = makeTree(18)
   387  		// Give the GC time to start scanning stacks.
   388  		time.Sleep(time.Millisecond)
   389  		panic(1)
   390  	})
   391  	_ = xtree
   392  }
   393  
   394  type xtreeNode struct {
   395  	l, r *xtreeNode
   396  }
   397  
   398  func makeTree(d int) *xtreeNode {
   399  	if d == 0 {
   400  		return new(xtreeNode)
   401  	}
   402  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
   403  }
   404  
   405  // use about n KB of stack and call f
   406  func useStackAndCall(n int, f func()) {
   407  	if n == 0 {
   408  		f()
   409  		return
   410  	}
   411  	var b [1024]byte // makes frame about 1KB
   412  	useStackAndCall(n-1+int(b[99]), f)
   413  }
   414  
   415  func useStack(n int) {
   416  	useStackAndCall(n, func() {})
   417  }
   418  
   419  func growing(c chan int, done chan struct{}) {
   420  	for n := range c {
   421  		useStack(n)
   422  		done <- struct{}{}
   423  	}
   424  	done <- struct{}{}
   425  }
   426  
   427  func TestStackCache(t *testing.T) {
   428  	// Allocate a bunch of goroutines and grow their stacks.
   429  	// Repeat a few times to test the stack cache.
   430  	const (
   431  		R = 4
   432  		G = 200
   433  		S = 5
   434  	)
   435  	for i := 0; i < R; i++ {
   436  		var reqchans [G]chan int
   437  		done := make(chan struct{})
   438  		for j := 0; j < G; j++ {
   439  			reqchans[j] = make(chan int)
   440  			go growing(reqchans[j], done)
   441  		}
   442  		for s := 0; s < S; s++ {
   443  			for j := 0; j < G; j++ {
   444  				reqchans[j] <- 1 << uint(s)
   445  			}
   446  			for j := 0; j < G; j++ {
   447  				<-done
   448  			}
   449  		}
   450  		for j := 0; j < G; j++ {
   451  			close(reqchans[j])
   452  		}
   453  		for j := 0; j < G; j++ {
   454  			<-done
   455  		}
   456  	}
   457  }
   458  
   459  func TestStackOutput(t *testing.T) {
   460  	b := make([]byte, 1024)
   461  	stk := string(b[:Stack(b, false)])
   462  	if !strings.HasPrefix(stk, "goroutine ") {
   463  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   464  		t.Errorf("Stack output should begin with \"goroutine \"")
   465  	}
   466  }
   467  
   468  func TestStackAllOutput(t *testing.T) {
   469  	b := make([]byte, 1024)
   470  	stk := string(b[:Stack(b, true)])
   471  	if !strings.HasPrefix(stk, "goroutine ") {
   472  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   473  		t.Errorf("Stack output should begin with \"goroutine \"")
   474  	}
   475  }
   476  
   477  func TestStackPanic(t *testing.T) {
   478  	// Test that stack copying copies panics correctly. This is difficult
   479  	// to test because it is very unlikely that the stack will be copied
   480  	// in the middle of gopanic. But it can happen.
   481  	// To make this test effective, edit panic.go:gopanic and uncomment
   482  	// the GC() call just before freedefer(d).
   483  	defer func() {
   484  		if x := recover(); x == nil {
   485  			t.Errorf("recover failed")
   486  		}
   487  	}()
   488  	useStack(32)
   489  	panic("test panic")
   490  }
   491  
   492  func BenchmarkStackCopyPtr(b *testing.B) {
   493  	c := make(chan bool)
   494  	for i := 0; i < b.N; i++ {
   495  		go func() {
   496  			i := 1000000
   497  			countp(&i)
   498  			c <- true
   499  		}()
   500  		<-c
   501  	}
   502  }
   503  
   504  func countp(n *int) {
   505  	if *n == 0 {
   506  		return
   507  	}
   508  	*n--
   509  	countp(n)
   510  }
   511  
   512  func BenchmarkStackCopy(b *testing.B) {
   513  	c := make(chan bool)
   514  	for i := 0; i < b.N; i++ {
   515  		go func() {
   516  			count(1000000)
   517  			c <- true
   518  		}()
   519  		<-c
   520  	}
   521  }
   522  
   523  func count(n int) int {
   524  	if n == 0 {
   525  		return 0
   526  	}
   527  	return 1 + count(n-1)
   528  }
   529  
   530  func BenchmarkStackCopyNoCache(b *testing.B) {
   531  	c := make(chan bool)
   532  	for i := 0; i < b.N; i++ {
   533  		go func() {
   534  			count1(1000000)
   535  			c <- true
   536  		}()
   537  		<-c
   538  	}
   539  }
   540  
   541  func count1(n int) int {
   542  	if n <= 0 {
   543  		return 0
   544  	}
   545  	return 1 + count2(n-1)
   546  }
   547  
   548  func count2(n int) int  { return 1 + count3(n-1) }
   549  func count3(n int) int  { return 1 + count4(n-1) }
   550  func count4(n int) int  { return 1 + count5(n-1) }
   551  func count5(n int) int  { return 1 + count6(n-1) }
   552  func count6(n int) int  { return 1 + count7(n-1) }
   553  func count7(n int) int  { return 1 + count8(n-1) }
   554  func count8(n int) int  { return 1 + count9(n-1) }
   555  func count9(n int) int  { return 1 + count10(n-1) }
   556  func count10(n int) int { return 1 + count11(n-1) }
   557  func count11(n int) int { return 1 + count12(n-1) }
   558  func count12(n int) int { return 1 + count13(n-1) }
   559  func count13(n int) int { return 1 + count14(n-1) }
   560  func count14(n int) int { return 1 + count15(n-1) }
   561  func count15(n int) int { return 1 + count16(n-1) }
   562  func count16(n int) int { return 1 + count17(n-1) }
   563  func count17(n int) int { return 1 + count18(n-1) }
   564  func count18(n int) int { return 1 + count19(n-1) }
   565  func count19(n int) int { return 1 + count20(n-1) }
   566  func count20(n int) int { return 1 + count21(n-1) }
   567  func count21(n int) int { return 1 + count22(n-1) }
   568  func count22(n int) int { return 1 + count23(n-1) }
   569  func count23(n int) int { return 1 + count1(n-1) }
   570  
   571  type stkobjT struct {
   572  	p *stkobjT
   573  	x int64
   574  	y [20]int // consume some stack
   575  }
   576  
   577  // Sum creates a linked list of stkobjTs.
   578  func Sum(n int64, p *stkobjT) {
   579  	if n == 0 {
   580  		return
   581  	}
   582  	s := stkobjT{p: p, x: n}
   583  	Sum(n-1, &s)
   584  	p.x += s.x
   585  }
   586  
   587  func BenchmarkStackCopyWithStkobj(b *testing.B) {
   588  	c := make(chan bool)
   589  	for i := 0; i < b.N; i++ {
   590  		go func() {
   591  			var s stkobjT
   592  			Sum(100000, &s)
   593  			c <- true
   594  		}()
   595  		<-c
   596  	}
   597  }
   598  
   599  func BenchmarkIssue18138(b *testing.B) {
   600  	// Channel with N "can run a goroutine" tokens
   601  	const N = 10
   602  	c := make(chan []byte, N)
   603  	for i := 0; i < N; i++ {
   604  		c <- make([]byte, 1)
   605  	}
   606  
   607  	for i := 0; i < b.N; i++ {
   608  		<-c // get token
   609  		go func() {
   610  			useStackPtrs(1000, false) // uses ~1MB max
   611  			m := make([]byte, 8192)   // make GC trigger occasionally
   612  			c <- m                    // return token
   613  		}()
   614  	}
   615  }
   616  
   617  func useStackPtrs(n int, b bool) {
   618  	if b {
   619  		// This code contributes to the stack frame size, and hence to the
   620  		// stack copying cost. But since b is always false, it costs no
   621  		// execution time (not even the zeroing of a).
   622  		var a [128]*int // 1KB of pointers
   623  		a[n] = &n
   624  		n = *a[0]
   625  	}
   626  	if n == 0 {
   627  		return
   628  	}
   629  	useStackPtrs(n-1, b)
   630  }
   631  
   632  type structWithMethod struct{}
   633  
   634  func (s structWithMethod) caller() string {
   635  	_, file, line, ok := Caller(1)
   636  	if !ok {
   637  		panic("Caller failed")
   638  	}
   639  	return fmt.Sprintf("%s:%d", file, line)
   640  }
   641  
   642  func (s structWithMethod) callers() []uintptr {
   643  	pc := make([]uintptr, 16)
   644  	return pc[:Callers(0, pc)]
   645  }
   646  
   647  func (s structWithMethod) stack() string {
   648  	buf := make([]byte, 4<<10)
   649  	return string(buf[:Stack(buf, false)])
   650  }
   651  
   652  func (s structWithMethod) nop() {}
   653  
   654  func (s structWithMethod) inlinablePanic() { panic("panic") }
   655  
   656  func TestStackWrapperCaller(t *testing.T) {
   657  	var d structWithMethod
   658  	// Force the compiler to construct a wrapper method.
   659  	wrapper := (*structWithMethod).caller
   660  	// Check that the wrapper doesn't affect the stack trace.
   661  	if dc, ic := d.caller(), wrapper(&d); dc != ic {
   662  		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
   663  	}
   664  }
   665  
   666  func TestStackWrapperCallers(t *testing.T) {
   667  	var d structWithMethod
   668  	wrapper := (*structWithMethod).callers
   669  	// Check that <autogenerated> doesn't appear in the stack trace.
   670  	pcs := wrapper(&d)
   671  	frames := CallersFrames(pcs)
   672  	for {
   673  		fr, more := frames.Next()
   674  		if fr.File == "<autogenerated>" {
   675  			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
   676  		}
   677  		if !more {
   678  			break
   679  		}
   680  	}
   681  }
   682  
   683  func TestStackWrapperStack(t *testing.T) {
   684  	var d structWithMethod
   685  	wrapper := (*structWithMethod).stack
   686  	// Check that <autogenerated> doesn't appear in the stack trace.
   687  	stk := wrapper(&d)
   688  	if strings.Contains(stk, "<autogenerated>") {
   689  		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
   690  	}
   691  }
   692  
   693  func TestStackWrapperStackInlinePanic(t *testing.T) {
   694  	// Test that inline unwinding correctly tracks the callee by creating a
   695  	// stack of the form wrapper -> inlined function -> panic. If we mess up
   696  	// callee tracking, it will look like the wrapper called panic and we'll see
   697  	// the wrapper in the stack trace.
   698  	var d structWithMethod
   699  	wrapper := (*structWithMethod).inlinablePanic
   700  	defer func() {
   701  		err := recover()
   702  		if err == nil {
   703  			t.Fatalf("expected panic")
   704  		}
   705  		buf := make([]byte, 4<<10)
   706  		stk := string(buf[:Stack(buf, false)])
   707  		if strings.Contains(stk, "<autogenerated>") {
   708  			t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
   709  		}
   710  		// Self-check: make sure inlinablePanic got inlined.
   711  		if !testenv.OptimizationOff() {
   712  			if !strings.Contains(stk, "inlinablePanic(...)") {
   713  				t.Fatalf("inlinablePanic not inlined")
   714  			}
   715  		}
   716  	}()
   717  	wrapper(&d)
   718  }
   719  
   720  type I interface {
   721  	M()
   722  }
   723  
   724  func TestStackWrapperStackPanic(t *testing.T) {
   725  	t.Run("sigpanic", func(t *testing.T) {
   726  		// nil calls to interface methods cause a sigpanic.
   727  		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
   728  	})
   729  	t.Run("panicwrap", func(t *testing.T) {
   730  		// Nil calls to value method wrappers call panicwrap.
   731  		wrapper := (*structWithMethod).nop
   732  		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
   733  	})
   734  }
   735  
   736  func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
   737  	// Test that the stack trace from a panicking wrapper includes
   738  	// the wrapper, even though elide these when they don't panic.
   739  	t.Run("CallersFrames", func(t *testing.T) {
   740  		defer func() {
   741  			err := recover()
   742  			if err == nil {
   743  				t.Fatalf("expected panic")
   744  			}
   745  			pcs := make([]uintptr, 10)
   746  			n := Callers(0, pcs)
   747  			frames := CallersFrames(pcs[:n])
   748  			for {
   749  				frame, more := frames.Next()
   750  				t.Log(frame.Function)
   751  				if frame.Function == expect {
   752  					return
   753  				}
   754  				if !more {
   755  					break
   756  				}
   757  			}
   758  			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
   759  		}()
   760  		cb()
   761  	})
   762  	t.Run("Stack", func(t *testing.T) {
   763  		defer func() {
   764  			err := recover()
   765  			if err == nil {
   766  				t.Fatalf("expected panic")
   767  			}
   768  			buf := make([]byte, 4<<10)
   769  			stk := string(buf[:Stack(buf, false)])
   770  			if !strings.Contains(stk, "\n"+expect) {
   771  				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
   772  			}
   773  		}()
   774  		cb()
   775  	})
   776  }
   777  
   778  func TestCallersFromWrapper(t *testing.T) {
   779  	// Test that invoking CallersFrames on a stack where the first
   780  	// PC is an autogenerated wrapper keeps the wrapper in the
   781  	// trace. Normally we elide these, assuming that the wrapper
   782  	// calls the thing you actually wanted to see, but in this
   783  	// case we need to keep it.
   784  	pc := reflect.ValueOf(I.M).Pointer()
   785  	frames := CallersFrames([]uintptr{pc})
   786  	frame, more := frames.Next()
   787  	if frame.Function != "runtime_test.I.M" {
   788  		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
   789  	}
   790  	if more {
   791  		t.Fatalf("want 1 frame, got > 1")
   792  	}
   793  }
   794  
   795  func TestTracebackSystemstack(t *testing.T) {
   796  	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
   797  		t.Skip("systemstack tail call not implemented on ppc64x")
   798  	}
   799  
   800  	// Test that profiles correctly jump over systemstack,
   801  	// including nested systemstack calls.
   802  	pcs := make([]uintptr, 20)
   803  	pcs = pcs[:TracebackSystemstack(pcs, 5)]
   804  	// Check that runtime.TracebackSystemstack appears five times
   805  	// and that we see TestTracebackSystemstack.
   806  	countIn, countOut := 0, 0
   807  	frames := CallersFrames(pcs)
   808  	var tb strings.Builder
   809  	for {
   810  		frame, more := frames.Next()
   811  		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
   812  		switch frame.Function {
   813  		case "runtime.TracebackSystemstack":
   814  			countIn++
   815  		case "runtime_test.TestTracebackSystemstack":
   816  			countOut++
   817  		}
   818  		if !more {
   819  			break
   820  		}
   821  	}
   822  	if countIn != 5 || countOut != 1 {
   823  		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
   824  	}
   825  }
   826  
   827  func TestTracebackAncestors(t *testing.T) {
   828  	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
   829  	for _, tracebackDepth := range []int{0, 1, 5, 50} {
   830  		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
   831  
   832  		numGoroutines := 3
   833  		numFrames := 2
   834  		ancestorsExpected := numGoroutines
   835  		if numGoroutines > tracebackDepth {
   836  			ancestorsExpected = tracebackDepth
   837  		}
   838  
   839  		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
   840  		if len(matches) != 2 {
   841  			t.Fatalf("want 2 goroutines, got:\n%s", output)
   842  		}
   843  
   844  		// Check functions in the traceback.
   845  		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
   846  		for _, fn := range fns {
   847  			if !strings.Contains(output, "\n"+fn+"(") {
   848  				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
   849  			}
   850  		}
   851  
   852  		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
   853  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   854  		}
   855  
   856  		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
   857  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   858  		}
   859  
   860  		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
   861  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   862  		}
   863  	}
   864  }
   865  
   866  // Test that defer closure is correctly scanned when the stack is scanned.
   867  func TestDeferLiveness(t *testing.T) {
   868  	output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
   869  	if output != "" {
   870  		t.Errorf("output:\n%s\n\nwant no output", output)
   871  	}
   872  }
   873  
   874  func TestDeferHeapAndStack(t *testing.T) {
   875  	P := 4     // processors
   876  	N := 10000 //iterations
   877  	D := 200   // stack depth
   878  
   879  	if testing.Short() {
   880  		P /= 2
   881  		N /= 10
   882  		D /= 10
   883  	}
   884  	c := make(chan bool)
   885  	for p := 0; p < P; p++ {
   886  		go func() {
   887  			for i := 0; i < N; i++ {
   888  				if deferHeapAndStack(D) != 2*D {
   889  					panic("bad result")
   890  				}
   891  			}
   892  			c <- true
   893  		}()
   894  	}
   895  	for p := 0; p < P; p++ {
   896  		<-c
   897  	}
   898  }
   899  
   900  // deferHeapAndStack(n) computes 2*n
   901  func deferHeapAndStack(n int) (r int) {
   902  	if n == 0 {
   903  		return 0
   904  	}
   905  	if n%2 == 0 {
   906  		// heap-allocated defers
   907  		for i := 0; i < 2; i++ {
   908  			defer func() {
   909  				r++
   910  			}()
   911  		}
   912  	} else {
   913  		// stack-allocated defers
   914  		defer func() {
   915  			r++
   916  		}()
   917  		defer func() {
   918  			r++
   919  		}()
   920  	}
   921  	r = deferHeapAndStack(n - 1)
   922  	escapeMe(new([1024]byte)) // force some GCs
   923  	return
   924  }
   925  
   926  // Pass a value to escapeMe to force it to escape.
   927  var escapeMe = func(x any) {}
   928  
   929  func TestFramePointerAdjust(t *testing.T) {
   930  	switch GOARCH {
   931  	case "amd64", "arm64":
   932  	default:
   933  		t.Skipf("frame pointer is not supported on %s", GOARCH)
   934  	}
   935  	output := runTestProg(t, "testprog", "FramePointerAdjust")
   936  	if output != "" {
   937  		t.Errorf("output:\n%s\n\nwant no output", output)
   938  	}
   939  }
   940  
   941  // TestSystemstackFramePointerAdjust is a regression test for issue 59692 that
   942  // ensures that the frame pointer of systemstack is correctly adjusted. See CL
   943  // 489015 for more details.
   944  func TestSystemstackFramePointerAdjust(t *testing.T) {
   945  	growAndShrinkStack(512, [1024]byte{})
   946  }
   947  
   948  // growAndShrinkStack grows the stack of the current goroutine in order to
   949  // shrink it again and verify that all frame pointers on the new stack have
   950  // been correctly adjusted. stackBallast is used to ensure we're not depending
   951  // on the current heuristics of stack shrinking too much.
   952  func growAndShrinkStack(n int, stackBallast [1024]byte) {
   953  	if n <= 0 {
   954  		return
   955  	}
   956  	growAndShrinkStack(n-1, stackBallast)
   957  	ShrinkStackAndVerifyFramePointers()
   958  }
   959  

View as plain text