Source file src/runtime/profbuf_test.go

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"regexp"
    10  	"runtime"
    11  	. "runtime"
    12  	"slices"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  	"unsafe"
    17  )
    18  
    19  func TestProfBuf(t *testing.T) {
    20  	const hdrSize = 2
    21  
    22  	write := func(t *testing.T, b *ProfBuf, tag unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
    23  		b.Write(&tag, now, hdr, stk)
    24  	}
    25  	read := func(t *testing.T, b *ProfBuf, data []uint64, tags []unsafe.Pointer) {
    26  		rdata, rtags, eof := b.Read(ProfBufNonBlocking)
    27  		if !slices.Equal(rdata, data) || !slices.Equal(rtags, tags) {
    28  			t.Fatalf("unexpected profile read:\nhave data %#x\nwant data %#x\nhave tags %#x\nwant tags %#x", rdata, data, rtags, tags)
    29  		}
    30  		if eof {
    31  			t.Fatalf("unexpected eof")
    32  		}
    33  	}
    34  	readBlock := func(t *testing.T, b *ProfBuf, data []uint64, tags []unsafe.Pointer) func() {
    35  		c := make(chan int)
    36  		go func() {
    37  			eof := data == nil
    38  			rdata, rtags, reof := b.Read(ProfBufBlocking)
    39  			if !slices.Equal(rdata, data) || !slices.Equal(rtags, tags) || reof != eof {
    40  				// Errorf, not Fatalf, because called in goroutine.
    41  				t.Errorf("unexpected profile read:\nhave data %#x\nwant data %#x\nhave tags %#x\nwant tags %#x\nhave eof=%v, want %v", rdata, data, rtags, tags, reof, eof)
    42  			}
    43  			c <- 1
    44  		}()
    45  		time.Sleep(10 * time.Millisecond) // let goroutine run and block
    46  		return func() { <-c }
    47  	}
    48  	readEOF := func(t *testing.T, b *ProfBuf) {
    49  		rdata, rtags, eof := b.Read(ProfBufBlocking)
    50  		if rdata != nil || rtags != nil || !eof {
    51  			t.Errorf("unexpected profile read: %#x, %#x, eof=%v; want nil, nil, eof=true", rdata, rtags, eof)
    52  		}
    53  		rdata, rtags, eof = b.Read(ProfBufNonBlocking)
    54  		if rdata != nil || rtags != nil || !eof {
    55  			t.Errorf("unexpected profile read (non-blocking): %#x, %#x, eof=%v; want nil, nil, eof=true", rdata, rtags, eof)
    56  		}
    57  	}
    58  
    59  	myTags := make([]byte, 100)
    60  	t.Logf("myTags is %p", &myTags[0])
    61  
    62  	t.Run("BasicWriteRead", func(t *testing.T) {
    63  		b := NewProfBuf(2, 11, 1)
    64  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
    65  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
    66  		read(t, b, nil, nil) // release data returned by previous read
    67  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
    68  		read(t, b, []uint64{8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[2])})
    69  	})
    70  
    71  	t.Run("ReadMany", func(t *testing.T) {
    72  		b := NewProfBuf(2, 50, 50)
    73  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
    74  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
    75  		write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{506})
    76  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 99, 101, 102, 201, 202, 203, 204, 5, 500, 502, 504, 506}, []unsafe.Pointer{unsafe.Pointer(&myTags[0]), unsafe.Pointer(&myTags[2]), unsafe.Pointer(&myTags[1])})
    77  	})
    78  
    79  	t.Run("ReadManyShortData", func(t *testing.T) {
    80  		b := NewProfBuf(2, 50, 50)
    81  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
    82  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
    83  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[0]), unsafe.Pointer(&myTags[2])})
    84  	})
    85  
    86  	t.Run("ReadManyShortTags", func(t *testing.T) {
    87  		b := NewProfBuf(2, 50, 50)
    88  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
    89  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
    90  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[0]), unsafe.Pointer(&myTags[2])})
    91  	})
    92  
    93  	t.Run("ReadAfterOverflow1", func(t *testing.T) {
    94  		// overflow record synthesized by write
    95  		b := NewProfBuf(2, 16, 5)
    96  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})           // uses 10
    97  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])}) // reads 10 but still in use until next read
    98  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5})                       // uses 6
    99  		read(t, b, []uint64{6, 1, 2, 3, 4, 5}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})              // reads 6 but still in use until next read
   100  		// now 10 available
   101  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204, 205, 206, 207, 208, 209}) // no room
   102  		for i := 0; i < 299; i++ {
   103  			write(t, b, unsafe.Pointer(&myTags[3]), int64(100+i), []uint64{101, 102}, []uintptr{201, 202, 203, 204}) // no room for overflow+this record
   104  		}
   105  		write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{506}) // room for overflow+this record
   106  		read(t, b, []uint64{5, 99, 0, 0, 300, 5, 500, 502, 504, 506}, []unsafe.Pointer{nil, unsafe.Pointer(&myTags[1])})
   107  	})
   108  
   109  	t.Run("ReadAfterOverflow2", func(t *testing.T) {
   110  		// overflow record synthesized by read
   111  		b := NewProfBuf(2, 16, 5)
   112  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
   113  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213})
   114  		for i := 0; i < 299; i++ {
   115  			write(t, b, unsafe.Pointer(&myTags[3]), 100, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
   116  		}
   117  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])}) // reads 10 but still in use until next read
   118  		write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{})                     // still overflow
   119  		read(t, b, []uint64{5, 99, 0, 0, 301}, []unsafe.Pointer{nil})                                     // overflow synthesized by read
   120  		write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 505}, []uintptr{506})                  // written
   121  		read(t, b, []uint64{5, 500, 502, 505, 506}, []unsafe.Pointer{unsafe.Pointer(&myTags[1])})
   122  	})
   123  
   124  	t.Run("ReadAtEndAfterOverflow", func(t *testing.T) {
   125  		b := NewProfBuf(2, 12, 5)
   126  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
   127  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
   128  		for i := 0; i < 299; i++ {
   129  			write(t, b, unsafe.Pointer(&myTags[3]), 100, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
   130  		}
   131  		read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
   132  		read(t, b, []uint64{5, 99, 0, 0, 300}, []unsafe.Pointer{nil})
   133  		write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{506})
   134  		read(t, b, []uint64{5, 500, 502, 504, 506}, []unsafe.Pointer{unsafe.Pointer(&myTags[1])})
   135  	})
   136  
   137  	t.Run("BlockingWriteRead", func(t *testing.T) {
   138  		b := NewProfBuf(2, 11, 1)
   139  		wait := readBlock(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
   140  		write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
   141  		wait()
   142  		wait = readBlock(t, b, []uint64{8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[2])})
   143  		time.Sleep(10 * time.Millisecond)
   144  		write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
   145  		wait()
   146  		wait = readBlock(t, b, nil, nil)
   147  		b.Close()
   148  		wait()
   149  		wait = readBlock(t, b, nil, nil)
   150  		wait()
   151  		readEOF(t, b)
   152  	})
   153  
   154  	t.Run("DataWraparound", func(t *testing.T) {
   155  		b := NewProfBuf(2, 16, 1024)
   156  		for i := 0; i < 10; i++ {
   157  			write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
   158  			read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
   159  			read(t, b, nil, nil) // release data returned by previous read
   160  		}
   161  	})
   162  
   163  	t.Run("TagWraparound", func(t *testing.T) {
   164  		b := NewProfBuf(2, 1024, 2)
   165  		for i := 0; i < 10; i++ {
   166  			write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
   167  			read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
   168  			read(t, b, nil, nil) // release data returned by previous read
   169  		}
   170  	})
   171  
   172  	t.Run("BothWraparound", func(t *testing.T) {
   173  		b := NewProfBuf(2, 16, 2)
   174  		for i := 0; i < 10; i++ {
   175  			write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
   176  			read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
   177  			read(t, b, nil, nil) // release data returned by previous read
   178  		}
   179  	})
   180  }
   181  
   182  func TestProfBufDoubleWakeup(t *testing.T) {
   183  	b := NewProfBuf(2, 16, 2)
   184  	go func() {
   185  		for range 1000 {
   186  			b.Write(nil, 1, []uint64{5, 6}, []uintptr{7, 8})
   187  		}
   188  		b.Close()
   189  	}()
   190  	for {
   191  		_, _, eof := b.Read(ProfBufBlocking)
   192  		if eof {
   193  			return
   194  		}
   195  	}
   196  }
   197  
   198  func TestProfBufWakeup(t *testing.T) {
   199  	b := NewProfBuf(2, 16, 2)
   200  	var wg sync.WaitGroup
   201  	wg.Go(func() {
   202  		read := 0
   203  		for {
   204  			rdata, _, eof := b.Read(ProfBufBlocking)
   205  			if read == 0 && len(rdata) < 8 {
   206  				t.Errorf("first wake up at less than half full, got %x", rdata)
   207  			}
   208  			read += len(rdata)
   209  			if eof {
   210  				return
   211  			}
   212  		}
   213  	})
   214  
   215  	// Under the hood profBuf uses notetsleepg when the reader blocks.
   216  	// Different platforms have different implementations, leading to
   217  	// different statuses we need to look for to determine whether the
   218  	// reader is blocked.
   219  	var waitStatus string
   220  	switch runtime.GOOS {
   221  	case "js":
   222  		waitStatus = "waiting"
   223  	case "wasip1":
   224  		waitStatus = "runnable"
   225  	default:
   226  		waitStatus = "syscall"
   227  	}
   228  
   229  	// Ensure that the reader is blocked
   230  	awaitBlockedGoroutine(waitStatus, "TestProfBufWakeup.func1")
   231  	// NB: this writes 6 words not 4. Fine for the test.
   232  	// The reader shouldn't wake up for this
   233  	b.Write(nil, 1, []uint64{1, 2}, []uintptr{3, 4})
   234  
   235  	// The reader should still be blocked. The awaitBlockedGoroutine here
   236  	// checks that and also gives a buggy implementation a chance to
   237  	// actually wake up (it calls Gosched) before the next write. There is a
   238  	// small chance that a buggy implementation would have woken up but
   239  	// doesn't get scheduled by the time we do the next write. In that case
   240  	// the reader will see a more-than-half-full buffer and the test will
   241  	// pass. But if the implementation is broken, this test should fail
   242  	// regularly, even if not 100% of the time.
   243  	awaitBlockedGoroutine(waitStatus, "TestProfBufWakeup.func1")
   244  	b.Write(nil, 1, []uint64{5, 6}, []uintptr{7, 8})
   245  	b.Close()
   246  
   247  	// Wait here so we can be sure the reader got the data
   248  	wg.Wait()
   249  }
   250  
   251  // see also runtime/pprof tests
   252  func awaitBlockedGoroutine(state, fName string) {
   253  	// NB: this matches [state] as well as [state, n minutes]
   254  	re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s.*\]:\n(?:.+\n\t.+\n)*runtime_test\.%s`, regexp.QuoteMeta(state), fName)
   255  	r := regexp.MustCompile(re)
   256  
   257  	buf := make([]byte, 64<<10)
   258  	for {
   259  		Gosched()
   260  		n := Stack(buf, true)
   261  		if n == len(buf) {
   262  			// Buffer wasn't large enough for a full goroutine dump.
   263  			// Resize it and try again.
   264  			buf = make([]byte, 2*len(buf))
   265  			continue
   266  		}
   267  		const count = 1
   268  		if len(r.FindAll(buf[:n], -1)) >= count {
   269  			return
   270  		}
   271  	}
   272  }
   273  

View as plain text