Source file src/runtime/malloc_test.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"internal/asan"
    11  	"internal/goarch"
    12  	"internal/race"
    13  	"internal/testenv"
    14  	"os"
    15  	"os/exec"
    16  	"reflect"
    17  	"runtime"
    18  	. "runtime"
    19  	"strings"
    20  	"sync"
    21  	"sync/atomic"
    22  	"testing"
    23  	"time"
    24  	"unsafe"
    25  )
    26  
    27  var testMemStatsCount int
    28  
    29  func TestMemStats(t *testing.T) {
    30  	testMemStatsCount++
    31  
    32  	// Make sure there's at least one forced GC.
    33  	GC()
    34  
    35  	// Test that MemStats has sane values.
    36  	st := new(MemStats)
    37  	ReadMemStats(st)
    38  
    39  	nz := func(x any) error {
    40  		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
    41  			return nil
    42  		}
    43  		return fmt.Errorf("zero value")
    44  	}
    45  	le := func(thresh float64) func(any) error {
    46  		return func(x any) error {
    47  			// These sanity tests aren't necessarily valid
    48  			// with high -test.count values, so only run
    49  			// them once.
    50  			if testMemStatsCount > 1 {
    51  				return nil
    52  			}
    53  
    54  			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
    55  				return nil
    56  			}
    57  			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
    58  		}
    59  	}
    60  	eq := func(x any) func(any) error {
    61  		return func(y any) error {
    62  			if x == y {
    63  				return nil
    64  			}
    65  			return fmt.Errorf("want %v", x)
    66  		}
    67  	}
    68  	// Of the uint fields, HeapReleased, HeapIdle can be 0.
    69  	// PauseTotalNs can be 0 if timer resolution is poor.
    70  	fields := map[string][]func(any) error{
    71  		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
    72  		"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
    73  		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
    74  		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
    75  		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
    76  		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
    77  		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
    78  		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
    79  		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
    80  		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
    81  		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
    82  		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
    83  		"BySize": nil,
    84  	}
    85  
    86  	rst := reflect.ValueOf(st).Elem()
    87  	for i := 0; i < rst.Type().NumField(); i++ {
    88  		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
    89  		checks, ok := fields[name]
    90  		if !ok {
    91  			t.Errorf("unknown MemStats field %s", name)
    92  			continue
    93  		}
    94  		for _, check := range checks {
    95  			if err := check(val); err != nil {
    96  				t.Errorf("%s = %v: %s", name, val, err)
    97  			}
    98  		}
    99  	}
   100  
   101  	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
   102  		st.BuckHashSys+st.GCSys+st.OtherSys {
   103  		t.Fatalf("Bad sys value: %+v", *st)
   104  	}
   105  
   106  	if st.HeapIdle+st.HeapInuse != st.HeapSys {
   107  		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
   108  	}
   109  
   110  	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
   111  		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
   112  	}
   113  
   114  	var pauseTotal uint64
   115  	for _, pause := range st.PauseNs {
   116  		pauseTotal += pause
   117  	}
   118  	if int(st.NumGC) < len(st.PauseNs) {
   119  		// We have all pauses, so this should be exact.
   120  		if st.PauseTotalNs != pauseTotal {
   121  			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   122  		}
   123  		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
   124  			if st.PauseNs[i] != 0 {
   125  				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
   126  			}
   127  			if st.PauseEnd[i] != 0 {
   128  				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
   129  			}
   130  		}
   131  	} else {
   132  		if st.PauseTotalNs < pauseTotal {
   133  			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   134  		}
   135  	}
   136  
   137  	if st.NumForcedGC > st.NumGC {
   138  		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
   139  	}
   140  }
   141  
   142  func TestStringConcatenationAllocs(t *testing.T) {
   143  	n := testing.AllocsPerRun(1e3, func() {
   144  		b := make([]byte, 10)
   145  		for i := 0; i < 10; i++ {
   146  			b[i] = byte(i) + '0'
   147  		}
   148  		s := "foo" + string(b)
   149  		if want := "foo0123456789"; s != want {
   150  			t.Fatalf("want %v, got %v", want, s)
   151  		}
   152  	})
   153  	// Only string concatenation allocates.
   154  	if n != 1 {
   155  		t.Fatalf("want 1 allocation, got %v", n)
   156  	}
   157  }
   158  
   159  func TestTinyAlloc(t *testing.T) {
   160  	if runtime.Raceenabled {
   161  		t.Skip("tinyalloc suppressed when running in race mode")
   162  	}
   163  	if asan.Enabled {
   164  		t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
   165  	}
   166  	const N = 16
   167  	var v [N]unsafe.Pointer
   168  	for i := range v {
   169  		v[i] = unsafe.Pointer(new(byte))
   170  	}
   171  
   172  	chunks := make(map[uintptr]bool, N)
   173  	for _, p := range v {
   174  		chunks[uintptr(p)&^7] = true
   175  	}
   176  
   177  	if len(chunks) == N {
   178  		t.Fatal("no bytes allocated within the same 8-byte chunk")
   179  	}
   180  }
   181  
   182  type obj12 struct {
   183  	a uint64
   184  	b uint32
   185  }
   186  
   187  func TestTinyAllocIssue37262(t *testing.T) {
   188  	if runtime.Raceenabled {
   189  		t.Skip("tinyalloc suppressed when running in race mode")
   190  	}
   191  	if asan.Enabled {
   192  		t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
   193  	}
   194  	// Try to cause an alignment access fault
   195  	// by atomically accessing the first 64-bit
   196  	// value of a tiny-allocated object.
   197  	// See issue 37262 for details.
   198  
   199  	// GC twice, once to reach a stable heap state
   200  	// and again to make sure we finish the sweep phase.
   201  	runtime.GC()
   202  	runtime.GC()
   203  
   204  	// Disable preemption so we stay on one P's tiny allocator and
   205  	// nothing else allocates from it.
   206  	runtime.Acquirem()
   207  
   208  	// Make 1-byte allocations until we get a fresh tiny slot.
   209  	aligned := false
   210  	for i := 0; i < 16; i++ {
   211  		x := runtime.Escape(new(byte))
   212  		if uintptr(unsafe.Pointer(x))&0xf == 0xf {
   213  			aligned = true
   214  			break
   215  		}
   216  	}
   217  	if !aligned {
   218  		runtime.Releasem()
   219  		t.Fatal("unable to get a fresh tiny slot")
   220  	}
   221  
   222  	// Create a 4-byte object so that the current
   223  	// tiny slot is partially filled.
   224  	runtime.Escape(new(uint32))
   225  
   226  	// Create a 12-byte object, which fits into the
   227  	// tiny slot. If it actually gets place there,
   228  	// then the field "a" will be improperly aligned
   229  	// for atomic access on 32-bit architectures.
   230  	// This won't be true if issue 36606 gets resolved.
   231  	tinyObj12 := runtime.Escape(new(obj12))
   232  
   233  	// Try to atomically access "x.a".
   234  	atomic.StoreUint64(&tinyObj12.a, 10)
   235  
   236  	runtime.Releasem()
   237  }
   238  
   239  // TestFreegc does basic testing of explicit frees.
   240  func TestFreegc(t *testing.T) {
   241  	tests := []struct {
   242  		size   string
   243  		f      func(noscan bool) func(*testing.T)
   244  		noscan bool
   245  	}{
   246  		// Types without pointers.
   247  		{"size=16", testFreegc[[16]byte], true}, // smallest we support currently
   248  		{"size=17", testFreegc[[17]byte], true},
   249  		{"size=64", testFreegc[[64]byte], true},
   250  		{"size=500", testFreegc[[500]byte], true},
   251  		{"size=512", testFreegc[[512]byte], true},
   252  		{"size=4096", testFreegc[[4096]byte], true},
   253  		{"size=20000", testFreegc[[20000]byte], true},       // not power of 2 or spc boundary
   254  		{"size=32KiB-8", testFreegc[[1<<15 - 8]byte], true}, // max noscan small object for 64-bit
   255  	}
   256  
   257  	// Run the tests twice if not in -short mode or not otherwise saving test time.
   258  	// First while manually calling runtime.GC to slightly increase isolation (perhaps making
   259  	// problems more reproducible).
   260  	for _, tt := range tests {
   261  		runtime.GC()
   262  		t.Run(fmt.Sprintf("gc=yes/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
   263  	}
   264  	runtime.GC()
   265  
   266  	if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
   267  		return
   268  	}
   269  
   270  	// Again, but without manually calling runtime.GC in the loop (perhaps less isolation might
   271  	// trigger problems).
   272  	for _, tt := range tests {
   273  		t.Run(fmt.Sprintf("gc=no/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
   274  	}
   275  	runtime.GC()
   276  }
   277  
   278  func testFreegc[T comparable](noscan bool) func(*testing.T) {
   279  	// We use stressMultiple to influence the duration of the tests.
   280  	// When testing freegc changes, stressMultiple can be increased locally
   281  	// to test longer or in some cases with more goroutines.
   282  	// It can also be helpful to test with GODEBUG=clobberfree=1 and
   283  	// with and without doubleCheckMalloc and doubleCheckReusable enabled.
   284  	stressMultiple := 10
   285  	if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
   286  		stressMultiple = 1
   287  	}
   288  
   289  	return func(t *testing.T) {
   290  		alloc := func() *T {
   291  			// Force heap alloc, plus some light validation of zeroed memory.
   292  			t.Helper()
   293  			p := Escape(new(T))
   294  			var zero T
   295  			if *p != zero {
   296  				t.Fatalf("allocator returned non-zero memory: %v", *p)
   297  			}
   298  			return p
   299  		}
   300  
   301  		free := func(p *T) {
   302  			t.Helper()
   303  			var zero T
   304  			if *p != zero {
   305  				t.Fatalf("found non-zero memory before freegc (tests do not modify memory): %v", *p)
   306  			}
   307  			runtime.Freegc(unsafe.Pointer(p), unsafe.Sizeof(*p), noscan)
   308  		}
   309  
   310  		t.Run("basic-free", func(t *testing.T) {
   311  			// Test that freeing a live heap object doesn't crash.
   312  			for range 100 {
   313  				p := alloc()
   314  				free(p)
   315  			}
   316  		})
   317  
   318  		t.Run("stack-free", func(t *testing.T) {
   319  			// Test that freeing a stack object doesn't crash.
   320  			for range 100 {
   321  				var x [32]byte
   322  				var y [32]*int
   323  				runtime.Freegc(unsafe.Pointer(&x), unsafe.Sizeof(x), true)  // noscan
   324  				runtime.Freegc(unsafe.Pointer(&y), unsafe.Sizeof(y), false) // !noscan
   325  			}
   326  		})
   327  
   328  		// Check our allocations. These tests rely on the
   329  		// current implementation treating a re-used object
   330  		// as not adding to the allocation counts seen
   331  		// by testing.AllocsPerRun. (This is not the desired
   332  		// long-term behavior, but it is the current behavior and
   333  		// makes these tests convenient).
   334  
   335  		t.Run("allocs-baseline", func(t *testing.T) {
   336  			// Baseline result without any explicit free.
   337  			allocs := testing.AllocsPerRun(100, func() {
   338  				for range 100 {
   339  					p := alloc()
   340  					_ = p
   341  				}
   342  			})
   343  			if allocs < 100 {
   344  				// TODO(thepudds): we get exactly 100 for almost all the tests, but investigate why
   345  				// ~101 allocs for TestFreegc/ptrs=true/size=32KiB-8.
   346  				t.Fatalf("expected >=100 allocations, got %v", allocs)
   347  			}
   348  		})
   349  
   350  		t.Run("allocs-with-free", func(t *testing.T) {
   351  			// Same allocations, but now using explicit free so that
   352  			// no allocs get reported. (Again, not the desired long-term behavior).
   353  			if SizeSpecializedMallocEnabled && !noscan {
   354  				// TODO(thepudds): skip at this point in the stack for size-specialized malloc
   355  				// with !noscan. Additional integration with sizespecializedmalloc is in a later CL.
   356  				t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
   357  			}
   358  			if !RuntimeFreegcEnabled {
   359  				t.Skip("skipping alloc tests with runtime.freegc disabled")
   360  			}
   361  			allocs := testing.AllocsPerRun(100, func() {
   362  				for range 100 {
   363  					p := alloc()
   364  					free(p)
   365  				}
   366  			})
   367  			if allocs != 0 {
   368  				t.Fatalf("expected 0 allocations, got %v", allocs)
   369  			}
   370  		})
   371  
   372  		t.Run("free-multiple", func(t *testing.T) {
   373  			// Multiple allocations outstanding before explicitly freeing,
   374  			// but still within the limit of our smallest free list size
   375  			// so that no allocs are reported. (Again, not long-term behavior).
   376  			if SizeSpecializedMallocEnabled && !noscan {
   377  				// TODO(thepudds): skip at this point in the stack for size-specialized malloc
   378  				// with !noscan. Additional integration with sizespecializedmalloc is in a later CL.
   379  				t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
   380  			}
   381  			if !RuntimeFreegcEnabled {
   382  				t.Skip("skipping alloc tests with runtime.freegc disabled")
   383  			}
   384  			const maxOutstanding = 20
   385  			s := make([]*T, 0, maxOutstanding)
   386  			allocs := testing.AllocsPerRun(100*stressMultiple, func() {
   387  				s = s[:0]
   388  				for range maxOutstanding {
   389  					p := alloc()
   390  					s = append(s, p)
   391  				}
   392  				for _, p := range s {
   393  					free(p)
   394  				}
   395  			})
   396  			if allocs != 0 {
   397  				t.Fatalf("expected 0 allocations, got %v", allocs)
   398  			}
   399  		})
   400  
   401  		if runtime.GOARCH == "wasm" {
   402  			// TODO(thepudds): for wasm, double-check if just slow, vs. some test logic problem,
   403  			// vs. something else. It might have been wasm was slowest with tests that spawn
   404  			// many goroutines, which might be expected for wasm. This skip might no longer be
   405  			// needed now that we have tuned test execution time more, or perhaps wasm should just
   406  			// always run in short mode, which might also let us remove this skip.
   407  			t.Skip("skipping remaining freegc tests, was timing out on wasm")
   408  		}
   409  
   410  		t.Run("free-many", func(t *testing.T) {
   411  			// Confirm we are graceful if we have more freed elements at once
   412  			// than the max free list size.
   413  			s := make([]*T, 0, 1000)
   414  			iterations := stressMultiple * stressMultiple // currently 1 (-short) or 100
   415  			for range iterations {
   416  				s = s[:0]
   417  				for range 1000 {
   418  					p := alloc()
   419  					s = append(s, p)
   420  				}
   421  				for _, p := range s {
   422  					free(p)
   423  				}
   424  			}
   425  		})
   426  
   427  		t.Run("duplicate-check", func(t *testing.T) {
   428  			// A simple duplicate allocation test. We track what should be the set
   429  			// of live pointers in a map across a series of allocs and frees,
   430  			// and fail if a live pointer value is returned by an allocation.
   431  			// TODO: maybe add randomness? allow more live pointers? do across goroutines?
   432  			live := make(map[uintptr]bool)
   433  			for i := range 100 * stressMultiple {
   434  				var s []*T
   435  				// Alloc 10 times, tracking the live pointer values.
   436  				for j := range 10 {
   437  					p := alloc()
   438  					uptr := uintptr(unsafe.Pointer(p))
   439  					if live[uptr] {
   440  						t.Fatalf("found duplicate pointer (0x%x). i: %d j: %d", uptr, i, j)
   441  					}
   442  					live[uptr] = true
   443  					s = append(s, p)
   444  				}
   445  				// Explicitly free those pointers, removing them from the live map.
   446  				for k := range s {
   447  					p := s[k]
   448  					s[k] = nil
   449  					uptr := uintptr(unsafe.Pointer(p))
   450  					free(p)
   451  					delete(live, uptr)
   452  				}
   453  			}
   454  		})
   455  
   456  		t.Run("free-other-goroutine", func(t *testing.T) {
   457  			// Use explicit free, but the free happens on a different goroutine than the alloc.
   458  			// This also lightly simulates how the free code sees P migration or flushing
   459  			// the mcache, assuming we have > 1 P. (Not using testing.AllocsPerRun here).
   460  			iterations := 10 * stressMultiple * stressMultiple // currently 10 (-short) or 1000
   461  			for _, capacity := range []int{2} {
   462  				for range iterations {
   463  					ch := make(chan *T, capacity)
   464  					var wg sync.WaitGroup
   465  					for range 2 {
   466  						wg.Add(1)
   467  						go func() {
   468  							defer wg.Done()
   469  							for p := range ch {
   470  								free(p)
   471  							}
   472  						}()
   473  					}
   474  					for range 100 {
   475  						p := alloc()
   476  						ch <- p
   477  					}
   478  					close(ch)
   479  					wg.Wait()
   480  				}
   481  			}
   482  		})
   483  
   484  		t.Run("many-goroutines", func(t *testing.T) {
   485  			// Allocate across multiple goroutines, freeing on the same goroutine.
   486  			// TODO: probably remove the duplicate checking here; not that useful.
   487  			counts := []int{1, 2, 4, 8, 10 * stressMultiple}
   488  			for _, goroutines := range counts {
   489  				var wg sync.WaitGroup
   490  				for range goroutines {
   491  					wg.Add(1)
   492  					go func() {
   493  						defer wg.Done()
   494  						live := make(map[uintptr]bool)
   495  						for range 100 * stressMultiple {
   496  							p := alloc()
   497  							uptr := uintptr(unsafe.Pointer(p))
   498  							if live[uptr] {
   499  								panic("TestFreeLive: found duplicate pointer")
   500  							}
   501  							live[uptr] = true
   502  							free(p)
   503  							delete(live, uptr)
   504  						}
   505  					}()
   506  				}
   507  				wg.Wait()
   508  			}
   509  		})
   510  
   511  		t.Run("assist-credit", func(t *testing.T) {
   512  			// Allocate and free using the same span class repeatedly while
   513  			// verifying it results in a net zero change in assist credit.
   514  			// This helps double-check our manipulation of the assist credit
   515  			// during mallocgc/freegc, including in cases when there is
   516  			// internal fragmentation when the requested mallocgc size is
   517  			// smaller than the size class.
   518  			//
   519  			// See https://go.dev/cl/717520 for some additional discussion,
   520  			// including how we can deliberately cause the test to fail currently
   521  			// if we purposefully introduce some assist credit bugs.
   522  			if SizeSpecializedMallocEnabled && !noscan {
   523  				// TODO(thepudds): skip this test at this point in the stack; later CL has
   524  				// integration with sizespecializedmalloc.
   525  				t.Skip("temporarily skip assist credit tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
   526  			}
   527  			if !RuntimeFreegcEnabled {
   528  				t.Skip("skipping assist credit test with runtime.freegc disabled")
   529  			}
   530  
   531  			// Use a background goroutine to continuously run the GC.
   532  			done := make(chan struct{})
   533  			defer close(done)
   534  			go func() {
   535  				for {
   536  					select {
   537  					case <-done:
   538  						return
   539  					default:
   540  						runtime.GC()
   541  					}
   542  				}
   543  			}()
   544  
   545  			// If making changes related to this test, consider testing locally with
   546  			// larger counts, like 100K or 1M.
   547  			counts := []int{1, 2, 10, 100 * stressMultiple}
   548  			// Dropping down to GOMAXPROCS=1 might help reduce noise.
   549  			defer GOMAXPROCS(GOMAXPROCS(1))
   550  			size := int64(unsafe.Sizeof(*new(T)))
   551  			for _, count := range counts {
   552  				// Start by forcing a GC to reset this g's assist credit
   553  				// and perhaps help us get a cleaner measurement of GC cycle count.
   554  				runtime.GC()
   555  				for i := range count {
   556  					// We disable preemption to reduce other code's ability to adjust this g's
   557  					// assist credit or otherwise change things while we are measuring.
   558  					Acquirem()
   559  
   560  					// We do two allocations per loop, with the second allocation being
   561  					// the one we measure. The first allocation tries to ensure at least one
   562  					// reusable object on the mspan's free list when we do our measured allocation.
   563  					p := alloc()
   564  					free(p)
   565  
   566  					// Now do our primary allocation of interest, bracketed by measurements.
   567  					// We measure more than we strictly need (to log details in case of a failure).
   568  					creditStart := AssistCredit()
   569  					blackenStart := GcBlackenEnable()
   570  					p = alloc()
   571  					blackenAfterAlloc := GcBlackenEnable()
   572  					creditAfterAlloc := AssistCredit()
   573  					free(p)
   574  					blackenEnd := GcBlackenEnable()
   575  					creditEnd := AssistCredit()
   576  
   577  					Releasem()
   578  					GoschedIfBusy()
   579  
   580  					delta := creditEnd - creditStart
   581  					if delta != 0 {
   582  						t.Logf("assist credit non-zero delta: %d", delta)
   583  						t.Logf("\t| size: %d i: %d count: %d", size, i, count)
   584  						t.Logf("\t| credit before: %d credit after: %d", creditStart, creditEnd)
   585  						t.Logf("\t| alloc delta: %d free delta: %d",
   586  							creditAfterAlloc-creditStart, creditEnd-creditAfterAlloc)
   587  						t.Logf("\t| gcBlackenEnable (start / after alloc / end): %v/%v/%v",
   588  							blackenStart, blackenAfterAlloc, blackenEnd)
   589  						t.FailNow()
   590  					}
   591  				}
   592  			}
   593  		})
   594  	}
   595  }
   596  
   597  func TestPageCacheLeak(t *testing.T) {
   598  	defer GOMAXPROCS(GOMAXPROCS(1))
   599  	leaked := PageCachePagesLeaked()
   600  	if leaked != 0 {
   601  		t.Fatalf("found %d leaked pages in page caches", leaked)
   602  	}
   603  }
   604  
   605  func TestPhysicalMemoryUtilization(t *testing.T) {
   606  	got := runTestProg(t, "testprog", "GCPhys")
   607  	want := "OK\n"
   608  	if got != want {
   609  		t.Fatalf("expected %q, but got %q", want, got)
   610  	}
   611  }
   612  
   613  func TestScavengedBitsCleared(t *testing.T) {
   614  	var mismatches [128]BitsMismatch
   615  	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
   616  		t.Errorf("uncleared scavenged bits")
   617  		for _, m := range mismatches[:n] {
   618  			t.Logf("\t@ address 0x%x", m.Base)
   619  			t.Logf("\t|  got: %064b", m.Got)
   620  			t.Logf("\t| want: %064b", m.Want)
   621  		}
   622  		t.FailNow()
   623  	}
   624  }
   625  
   626  type acLink struct {
   627  	x [1 << 20]byte
   628  }
   629  
   630  var arenaCollisionSink []*acLink
   631  
   632  func TestArenaCollision(t *testing.T) {
   633  	// Test that mheap.sysAlloc handles collisions with other
   634  	// memory mappings.
   635  	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
   636  		cmd := testenv.CleanCmdEnv(exec.Command(testenv.Executable(t), "-test.run=^TestArenaCollision$", "-test.v"))
   637  		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
   638  		out, err := cmd.CombinedOutput()
   639  		if race.Enabled {
   640  			// This test runs the runtime out of hint
   641  			// addresses, so it will start mapping the
   642  			// heap wherever it can. The race detector
   643  			// doesn't support this, so look for the
   644  			// expected failure.
   645  			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
   646  				t.Fatalf("want %q, got:\n%s", want, string(out))
   647  			}
   648  		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
   649  			t.Fatalf("%s\n(exit status %v)", string(out), err)
   650  		}
   651  		return
   652  	}
   653  	disallowed := [][2]uintptr{}
   654  	// Drop all but the next 3 hints. 64-bit has a lot of hints,
   655  	// so it would take a lot of memory to go through all of them.
   656  	KeepNArenaHints(3)
   657  	// Consume these 3 hints and force the runtime to find some
   658  	// fallback hints.
   659  	for i := 0; i < 5; i++ {
   660  		// Reserve memory at the next hint so it can't be used
   661  		// for the heap.
   662  		start, end, ok := MapNextArenaHint()
   663  		if !ok {
   664  			t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end)
   665  		}
   666  		t.Logf("reserved [%#x, %#x)", start, end)
   667  		disallowed = append(disallowed, [2]uintptr{start, end})
   668  
   669  		hint, ok := NextArenaHint()
   670  		if !ok {
   671  			// We're out of arena hints. There's not much we can do now except give up.
   672  			// This might happen for a number of reasons, like if there's just something
   673  			// else already mapped in the address space where we put our hints. This is
   674  			// a bit more common than it used to be thanks to heap base randomization.
   675  			t.Skip("ran out of arena hints")
   676  		}
   677  
   678  		// Allocate until the runtime tries to use the hint we
   679  		// just mapped over.
   680  		for {
   681  			if next, ok := NextArenaHint(); !ok {
   682  				t.Skip("ran out of arena hints")
   683  			} else if next != hint {
   684  				break
   685  			}
   686  			ac := new(acLink)
   687  			arenaCollisionSink = append(arenaCollisionSink, ac)
   688  			// The allocation must not have fallen into
   689  			// one of the reserved regions.
   690  			p := uintptr(unsafe.Pointer(ac))
   691  			for _, d := range disallowed {
   692  				if d[0] <= p && p < d[1] {
   693  					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
   694  				}
   695  			}
   696  		}
   697  	}
   698  }
   699  
   700  func BenchmarkMalloc8(b *testing.B) {
   701  	for i := 0; i < b.N; i++ {
   702  		p := new(int64)
   703  		Escape(p)
   704  	}
   705  }
   706  
   707  func BenchmarkMalloc16(b *testing.B) {
   708  	for i := 0; i < b.N; i++ {
   709  		p := new([2]int64)
   710  		Escape(p)
   711  	}
   712  }
   713  
   714  func BenchmarkMalloc32(b *testing.B) {
   715  	for i := 0; i < b.N; i++ {
   716  		p := new([4]int64)
   717  		Escape(p)
   718  	}
   719  }
   720  
   721  func BenchmarkMallocTypeInfo8(b *testing.B) {
   722  	for i := 0; i < b.N; i++ {
   723  		p := new(struct {
   724  			p [8 / unsafe.Sizeof(uintptr(0))]*int
   725  		})
   726  		Escape(p)
   727  	}
   728  }
   729  
   730  func BenchmarkMallocTypeInfo16(b *testing.B) {
   731  	for i := 0; i < b.N; i++ {
   732  		p := new(struct {
   733  			p [16 / unsafe.Sizeof(uintptr(0))]*int
   734  		})
   735  		Escape(p)
   736  	}
   737  }
   738  
   739  func BenchmarkMallocTypeInfo32(b *testing.B) {
   740  	for i := 0; i < b.N; i++ {
   741  		p := new(struct {
   742  			p [32 / unsafe.Sizeof(uintptr(0))]*int
   743  		})
   744  		Escape(p)
   745  	}
   746  }
   747  
   748  type LargeStruct struct {
   749  	x [16][]byte
   750  }
   751  
   752  func BenchmarkMallocLargeStruct(b *testing.B) {
   753  	for i := 0; i < b.N; i++ {
   754  		p := make([]LargeStruct, 2)
   755  		Escape(p)
   756  	}
   757  }
   758  
   759  var n = flag.Int("n", 1000, "number of goroutines")
   760  
   761  func BenchmarkGoroutineSelect(b *testing.B) {
   762  	quit := make(chan struct{})
   763  	read := func(ch chan struct{}) {
   764  		for {
   765  			select {
   766  			case _, ok := <-ch:
   767  				if !ok {
   768  					return
   769  				}
   770  			case <-quit:
   771  				return
   772  			}
   773  		}
   774  	}
   775  	benchHelper(b, *n, read)
   776  }
   777  
   778  func BenchmarkGoroutineBlocking(b *testing.B) {
   779  	read := func(ch chan struct{}) {
   780  		for {
   781  			if _, ok := <-ch; !ok {
   782  				return
   783  			}
   784  		}
   785  	}
   786  	benchHelper(b, *n, read)
   787  }
   788  
   789  func BenchmarkGoroutineForRange(b *testing.B) {
   790  	read := func(ch chan struct{}) {
   791  		for range ch {
   792  		}
   793  	}
   794  	benchHelper(b, *n, read)
   795  }
   796  
   797  func benchHelper(b *testing.B, n int, read func(chan struct{})) {
   798  	m := make([]chan struct{}, n)
   799  	for i := range m {
   800  		m[i] = make(chan struct{}, 1)
   801  		go read(m[i])
   802  	}
   803  	b.StopTimer()
   804  	b.ResetTimer()
   805  	GC()
   806  
   807  	for i := 0; i < b.N; i++ {
   808  		for _, ch := range m {
   809  			if ch != nil {
   810  				ch <- struct{}{}
   811  			}
   812  		}
   813  		time.Sleep(10 * time.Millisecond)
   814  		b.StartTimer()
   815  		GC()
   816  		b.StopTimer()
   817  	}
   818  
   819  	for _, ch := range m {
   820  		close(ch)
   821  	}
   822  	time.Sleep(10 * time.Millisecond)
   823  }
   824  
   825  func BenchmarkGoroutineIdle(b *testing.B) {
   826  	quit := make(chan struct{})
   827  	fn := func() {
   828  		<-quit
   829  	}
   830  	for i := 0; i < *n; i++ {
   831  		go fn()
   832  	}
   833  
   834  	GC()
   835  	b.ResetTimer()
   836  
   837  	for i := 0; i < b.N; i++ {
   838  		GC()
   839  	}
   840  
   841  	b.StopTimer()
   842  	close(quit)
   843  	time.Sleep(10 * time.Millisecond)
   844  }
   845  
   846  func TestMkmalloc(t *testing.T) {
   847  	testenv.MustHaveGoRun(t)
   848  	testenv.MustHaveExternalNetwork(t) // To download the golang.org/x/tools dependency.
   849  	output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput()
   850  	t.Logf("test output:\n%s", output)
   851  	if err != nil {
   852  		t.Errorf("_mkmalloc tests failed: %v", err)
   853  	}
   854  }
   855  
   856  func TestScanAllocIssue77573(t *testing.T) {
   857  	if asan.Enabled {
   858  		t.Skip("extra allocations with -asan causes this to fail")
   859  	}
   860  	verifyScanAlloc := func(t *testing.T, f func(), expectSize uintptr) {
   861  		runtime.Acquirem()
   862  		defer runtime.Releasem()
   863  		for i := 0; i < 100; i++ {
   864  			before := runtime.GetScanAlloc()
   865  			f()
   866  			after := runtime.GetScanAlloc()
   867  			// When mcache refills a new mspan, scanAlloc will be set to 0.
   868  			// As a result, the calculated value is not the scanAlloc of the allocated object, just retry again.
   869  			if after > before {
   870  				actualSize := after - before
   871  				if actualSize != expectSize {
   872  					t.Errorf("wrong GC Scan Alloc Size:\nwant %+v\ngot  %+v", expectSize, actualSize)
   873  				}
   874  				return
   875  			}
   876  		}
   877  		t.Error("always refill, it still fails after running multiple times")
   878  	}
   879  	t.Run("heap slice ([]*int, 1)", func(t *testing.T) {
   880  		verifyScanAlloc(t, func() { runtime.Escape(make([]*int, 1)) }, goarch.PtrSize)
   881  	})
   882  	t.Run("heap slice ([]*int, 2)", func(t *testing.T) {
   883  		verifyScanAlloc(t, func() { runtime.Escape(make([]*int, 2)) }, 2*goarch.PtrSize)
   884  	})
   885  	t.Run("heap slice ([]*int, 3)", func(t *testing.T) {
   886  		verifyScanAlloc(t, func() { runtime.Escape(make([]*int, 3)) }, 3*goarch.PtrSize)
   887  	})
   888  }
   889  

View as plain text