Source file src/runtime/malloc_test.go

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"internal/asan"
    11  	"internal/race"
    12  	"internal/testenv"
    13  	"os"
    14  	"os/exec"
    15  	"reflect"
    16  	"runtime"
    17  	. "runtime"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  	"testing"
    22  	"time"
    23  	"unsafe"
    24  )
    25  
    26  var testMemStatsCount int
    27  
    28  func TestMemStats(t *testing.T) {
    29  	testMemStatsCount++
    30  
    31  	// Make sure there's at least one forced GC.
    32  	GC()
    33  
    34  	// Test that MemStats has sane values.
    35  	st := new(MemStats)
    36  	ReadMemStats(st)
    37  
    38  	nz := func(x any) error {
    39  		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
    40  			return nil
    41  		}
    42  		return fmt.Errorf("zero value")
    43  	}
    44  	le := func(thresh float64) func(any) error {
    45  		return func(x any) error {
    46  			// These sanity tests aren't necessarily valid
    47  			// with high -test.count values, so only run
    48  			// them once.
    49  			if testMemStatsCount > 1 {
    50  				return nil
    51  			}
    52  
    53  			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
    54  				return nil
    55  			}
    56  			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
    57  		}
    58  	}
    59  	eq := func(x any) func(any) error {
    60  		return func(y any) error {
    61  			if x == y {
    62  				return nil
    63  			}
    64  			return fmt.Errorf("want %v", x)
    65  		}
    66  	}
    67  	// Of the uint fields, HeapReleased, HeapIdle can be 0.
    68  	// PauseTotalNs can be 0 if timer resolution is poor.
    69  	fields := map[string][]func(any) error{
    70  		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
    71  		"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
    72  		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
    73  		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
    74  		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
    75  		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
    76  		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
    77  		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
    78  		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
    79  		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
    80  		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
    81  		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
    82  		"BySize": nil,
    83  	}
    84  
    85  	rst := reflect.ValueOf(st).Elem()
    86  	for i := 0; i < rst.Type().NumField(); i++ {
    87  		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
    88  		checks, ok := fields[name]
    89  		if !ok {
    90  			t.Errorf("unknown MemStats field %s", name)
    91  			continue
    92  		}
    93  		for _, check := range checks {
    94  			if err := check(val); err != nil {
    95  				t.Errorf("%s = %v: %s", name, val, err)
    96  			}
    97  		}
    98  	}
    99  
   100  	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
   101  		st.BuckHashSys+st.GCSys+st.OtherSys {
   102  		t.Fatalf("Bad sys value: %+v", *st)
   103  	}
   104  
   105  	if st.HeapIdle+st.HeapInuse != st.HeapSys {
   106  		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
   107  	}
   108  
   109  	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
   110  		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
   111  	}
   112  
   113  	var pauseTotal uint64
   114  	for _, pause := range st.PauseNs {
   115  		pauseTotal += pause
   116  	}
   117  	if int(st.NumGC) < len(st.PauseNs) {
   118  		// We have all pauses, so this should be exact.
   119  		if st.PauseTotalNs != pauseTotal {
   120  			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   121  		}
   122  		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
   123  			if st.PauseNs[i] != 0 {
   124  				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
   125  			}
   126  			if st.PauseEnd[i] != 0 {
   127  				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
   128  			}
   129  		}
   130  	} else {
   131  		if st.PauseTotalNs < pauseTotal {
   132  			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   133  		}
   134  	}
   135  
   136  	if st.NumForcedGC > st.NumGC {
   137  		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
   138  	}
   139  }
   140  
   141  func TestStringConcatenationAllocs(t *testing.T) {
   142  	n := testing.AllocsPerRun(1e3, func() {
   143  		b := make([]byte, 10)
   144  		for i := 0; i < 10; i++ {
   145  			b[i] = byte(i) + '0'
   146  		}
   147  		s := "foo" + string(b)
   148  		if want := "foo0123456789"; s != want {
   149  			t.Fatalf("want %v, got %v", want, s)
   150  		}
   151  	})
   152  	// Only string concatenation allocates.
   153  	if n != 1 {
   154  		t.Fatalf("want 1 allocation, got %v", n)
   155  	}
   156  }
   157  
   158  func TestTinyAlloc(t *testing.T) {
   159  	if runtime.Raceenabled {
   160  		t.Skip("tinyalloc suppressed when running in race mode")
   161  	}
   162  	if asan.Enabled {
   163  		t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
   164  	}
   165  	const N = 16
   166  	var v [N]unsafe.Pointer
   167  	for i := range v {
   168  		v[i] = unsafe.Pointer(new(byte))
   169  	}
   170  
   171  	chunks := make(map[uintptr]bool, N)
   172  	for _, p := range v {
   173  		chunks[uintptr(p)&^7] = true
   174  	}
   175  
   176  	if len(chunks) == N {
   177  		t.Fatal("no bytes allocated within the same 8-byte chunk")
   178  	}
   179  }
   180  
   181  type obj12 struct {
   182  	a uint64
   183  	b uint32
   184  }
   185  
   186  func TestTinyAllocIssue37262(t *testing.T) {
   187  	if runtime.Raceenabled {
   188  		t.Skip("tinyalloc suppressed when running in race mode")
   189  	}
   190  	if asan.Enabled {
   191  		t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
   192  	}
   193  	// Try to cause an alignment access fault
   194  	// by atomically accessing the first 64-bit
   195  	// value of a tiny-allocated object.
   196  	// See issue 37262 for details.
   197  
   198  	// GC twice, once to reach a stable heap state
   199  	// and again to make sure we finish the sweep phase.
   200  	runtime.GC()
   201  	runtime.GC()
   202  
   203  	// Disable preemption so we stay on one P's tiny allocator and
   204  	// nothing else allocates from it.
   205  	runtime.Acquirem()
   206  
   207  	// Make 1-byte allocations until we get a fresh tiny slot.
   208  	aligned := false
   209  	for i := 0; i < 16; i++ {
   210  		x := runtime.Escape(new(byte))
   211  		if uintptr(unsafe.Pointer(x))&0xf == 0xf {
   212  			aligned = true
   213  			break
   214  		}
   215  	}
   216  	if !aligned {
   217  		runtime.Releasem()
   218  		t.Fatal("unable to get a fresh tiny slot")
   219  	}
   220  
   221  	// Create a 4-byte object so that the current
   222  	// tiny slot is partially filled.
   223  	runtime.Escape(new(uint32))
   224  
   225  	// Create a 12-byte object, which fits into the
   226  	// tiny slot. If it actually gets place there,
   227  	// then the field "a" will be improperly aligned
   228  	// for atomic access on 32-bit architectures.
   229  	// This won't be true if issue 36606 gets resolved.
   230  	tinyObj12 := runtime.Escape(new(obj12))
   231  
   232  	// Try to atomically access "x.a".
   233  	atomic.StoreUint64(&tinyObj12.a, 10)
   234  
   235  	runtime.Releasem()
   236  }
   237  
   238  // TestFreegc does basic testing of explicit frees.
   239  func TestFreegc(t *testing.T) {
   240  	tests := []struct {
   241  		size   string
   242  		f      func(noscan bool) func(*testing.T)
   243  		noscan bool
   244  	}{
   245  		// Types without pointers.
   246  		{"size=16", testFreegc[[16]byte], true}, // smallest we support currently
   247  		{"size=17", testFreegc[[17]byte], true},
   248  		{"size=64", testFreegc[[64]byte], true},
   249  		{"size=500", testFreegc[[500]byte], true},
   250  		{"size=512", testFreegc[[512]byte], true},
   251  		{"size=4096", testFreegc[[4096]byte], true},
   252  		{"size=20000", testFreegc[[20000]byte], true},       // not power of 2 or spc boundary
   253  		{"size=32KiB-8", testFreegc[[1<<15 - 8]byte], true}, // max noscan small object for 64-bit
   254  	}
   255  
   256  	// Run the tests twice if not in -short mode or not otherwise saving test time.
   257  	// First while manually calling runtime.GC to slightly increase isolation (perhaps making
   258  	// problems more reproducible).
   259  	for _, tt := range tests {
   260  		runtime.GC()
   261  		t.Run(fmt.Sprintf("gc=yes/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
   262  	}
   263  	runtime.GC()
   264  
   265  	if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
   266  		return
   267  	}
   268  
   269  	// Again, but without manually calling runtime.GC in the loop (perhaps less isolation might
   270  	// trigger problems).
   271  	for _, tt := range tests {
   272  		t.Run(fmt.Sprintf("gc=no/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
   273  	}
   274  	runtime.GC()
   275  }
   276  
   277  func testFreegc[T comparable](noscan bool) func(*testing.T) {
   278  	// We use stressMultiple to influence the duration of the tests.
   279  	// When testing freegc changes, stressMultiple can be increased locally
   280  	// to test longer or in some cases with more goroutines.
   281  	// It can also be helpful to test with GODEBUG=clobberfree=1 and
   282  	// with and without doubleCheckMalloc and doubleCheckReusable enabled.
   283  	stressMultiple := 10
   284  	if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
   285  		stressMultiple = 1
   286  	}
   287  
   288  	return func(t *testing.T) {
   289  		alloc := func() *T {
   290  			// Force heap alloc, plus some light validation of zeroed memory.
   291  			t.Helper()
   292  			p := Escape(new(T))
   293  			var zero T
   294  			if *p != zero {
   295  				t.Fatalf("allocator returned non-zero memory: %v", *p)
   296  			}
   297  			return p
   298  		}
   299  
   300  		free := func(p *T) {
   301  			t.Helper()
   302  			var zero T
   303  			if *p != zero {
   304  				t.Fatalf("found non-zero memory before freegc (tests do not modify memory): %v", *p)
   305  			}
   306  			runtime.Freegc(unsafe.Pointer(p), unsafe.Sizeof(*p), noscan)
   307  		}
   308  
   309  		t.Run("basic-free", func(t *testing.T) {
   310  			// Test that freeing a live heap object doesn't crash.
   311  			for range 100 {
   312  				p := alloc()
   313  				free(p)
   314  			}
   315  		})
   316  
   317  		t.Run("stack-free", func(t *testing.T) {
   318  			// Test that freeing a stack object doesn't crash.
   319  			for range 100 {
   320  				var x [32]byte
   321  				var y [32]*int
   322  				runtime.Freegc(unsafe.Pointer(&x), unsafe.Sizeof(x), true)  // noscan
   323  				runtime.Freegc(unsafe.Pointer(&y), unsafe.Sizeof(y), false) // !noscan
   324  			}
   325  		})
   326  
   327  		// Check our allocations. These tests rely on the
   328  		// current implementation treating a re-used object
   329  		// as not adding to the allocation counts seen
   330  		// by testing.AllocsPerRun. (This is not the desired
   331  		// long-term behavior, but it is the current behavior and
   332  		// makes these tests convenient).
   333  
   334  		t.Run("allocs-baseline", func(t *testing.T) {
   335  			// Baseline result without any explicit free.
   336  			allocs := testing.AllocsPerRun(100, func() {
   337  				for range 100 {
   338  					p := alloc()
   339  					_ = p
   340  				}
   341  			})
   342  			if allocs < 100 {
   343  				// TODO(thepudds): we get exactly 100 for almost all the tests, but investigate why
   344  				// ~101 allocs for TestFreegc/ptrs=true/size=32KiB-8.
   345  				t.Fatalf("expected >=100 allocations, got %v", allocs)
   346  			}
   347  		})
   348  
   349  		t.Run("allocs-with-free", func(t *testing.T) {
   350  			// Same allocations, but now using explicit free so that
   351  			// no allocs get reported. (Again, not the desired long-term behavior).
   352  			if SizeSpecializedMallocEnabled && !noscan {
   353  				// TODO(thepudds): skip at this point in the stack for size-specialized malloc
   354  				// with !noscan. Additional integration with sizespecializedmalloc is in a later CL.
   355  				t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
   356  			}
   357  			if !RuntimeFreegcEnabled {
   358  				t.Skip("skipping alloc tests with runtime.freegc disabled")
   359  			}
   360  			allocs := testing.AllocsPerRun(100, func() {
   361  				for range 100 {
   362  					p := alloc()
   363  					free(p)
   364  				}
   365  			})
   366  			if allocs != 0 {
   367  				t.Fatalf("expected 0 allocations, got %v", allocs)
   368  			}
   369  		})
   370  
   371  		t.Run("free-multiple", func(t *testing.T) {
   372  			// Multiple allocations outstanding before explicitly freeing,
   373  			// but still within the limit of our smallest free list size
   374  			// so that no allocs are reported. (Again, not long-term behavior).
   375  			if SizeSpecializedMallocEnabled && !noscan {
   376  				// TODO(thepudds): skip at this point in the stack for size-specialized malloc
   377  				// with !noscan. Additional integration with sizespecializedmalloc is in a later CL.
   378  				t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
   379  			}
   380  			if !RuntimeFreegcEnabled {
   381  				t.Skip("skipping alloc tests with runtime.freegc disabled")
   382  			}
   383  			const maxOutstanding = 20
   384  			s := make([]*T, 0, maxOutstanding)
   385  			allocs := testing.AllocsPerRun(100*stressMultiple, func() {
   386  				s = s[:0]
   387  				for range maxOutstanding {
   388  					p := alloc()
   389  					s = append(s, p)
   390  				}
   391  				for _, p := range s {
   392  					free(p)
   393  				}
   394  			})
   395  			if allocs != 0 {
   396  				t.Fatalf("expected 0 allocations, got %v", allocs)
   397  			}
   398  		})
   399  
   400  		if runtime.GOARCH == "wasm" {
   401  			// TODO(thepudds): for wasm, double-check if just slow, vs. some test logic problem,
   402  			// vs. something else. It might have been wasm was slowest with tests that spawn
   403  			// many goroutines, which might be expected for wasm. This skip might no longer be
   404  			// needed now that we have tuned test execution time more, or perhaps wasm should just
   405  			// always run in short mode, which might also let us remove this skip.
   406  			t.Skip("skipping remaining freegc tests, was timing out on wasm")
   407  		}
   408  
   409  		t.Run("free-many", func(t *testing.T) {
   410  			// Confirm we are graceful if we have more freed elements at once
   411  			// than the max free list size.
   412  			s := make([]*T, 0, 1000)
   413  			iterations := stressMultiple * stressMultiple // currently 1 (-short) or 100
   414  			for range iterations {
   415  				s = s[:0]
   416  				for range 1000 {
   417  					p := alloc()
   418  					s = append(s, p)
   419  				}
   420  				for _, p := range s {
   421  					free(p)
   422  				}
   423  			}
   424  		})
   425  
   426  		t.Run("duplicate-check", func(t *testing.T) {
   427  			// A simple duplicate allocation test. We track what should be the set
   428  			// of live pointers in a map across a series of allocs and frees,
   429  			// and fail if a live pointer value is returned by an allocation.
   430  			// TODO: maybe add randomness? allow more live pointers? do across goroutines?
   431  			live := make(map[uintptr]bool)
   432  			for i := range 100 * stressMultiple {
   433  				var s []*T
   434  				// Alloc 10 times, tracking the live pointer values.
   435  				for j := range 10 {
   436  					p := alloc()
   437  					uptr := uintptr(unsafe.Pointer(p))
   438  					if live[uptr] {
   439  						t.Fatalf("found duplicate pointer (0x%x). i: %d j: %d", uptr, i, j)
   440  					}
   441  					live[uptr] = true
   442  					s = append(s, p)
   443  				}
   444  				// Explicitly free those pointers, removing them from the live map.
   445  				for k := range s {
   446  					p := s[k]
   447  					s[k] = nil
   448  					uptr := uintptr(unsafe.Pointer(p))
   449  					free(p)
   450  					delete(live, uptr)
   451  				}
   452  			}
   453  		})
   454  
   455  		t.Run("free-other-goroutine", func(t *testing.T) {
   456  			// Use explicit free, but the free happens on a different goroutine than the alloc.
   457  			// This also lightly simulates how the free code sees P migration or flushing
   458  			// the mcache, assuming we have > 1 P. (Not using testing.AllocsPerRun here).
   459  			iterations := 10 * stressMultiple * stressMultiple // currently 10 (-short) or 1000
   460  			for _, capacity := range []int{2} {
   461  				for range iterations {
   462  					ch := make(chan *T, capacity)
   463  					var wg sync.WaitGroup
   464  					for range 2 {
   465  						wg.Add(1)
   466  						go func() {
   467  							defer wg.Done()
   468  							for p := range ch {
   469  								free(p)
   470  							}
   471  						}()
   472  					}
   473  					for range 100 {
   474  						p := alloc()
   475  						ch <- p
   476  					}
   477  					close(ch)
   478  					wg.Wait()
   479  				}
   480  			}
   481  		})
   482  
   483  		t.Run("many-goroutines", func(t *testing.T) {
   484  			// Allocate across multiple goroutines, freeing on the same goroutine.
   485  			// TODO: probably remove the duplicate checking here; not that useful.
   486  			counts := []int{1, 2, 4, 8, 10 * stressMultiple}
   487  			for _, goroutines := range counts {
   488  				var wg sync.WaitGroup
   489  				for range goroutines {
   490  					wg.Add(1)
   491  					go func() {
   492  						defer wg.Done()
   493  						live := make(map[uintptr]bool)
   494  						for range 100 * stressMultiple {
   495  							p := alloc()
   496  							uptr := uintptr(unsafe.Pointer(p))
   497  							if live[uptr] {
   498  								panic("TestFreeLive: found duplicate pointer")
   499  							}
   500  							live[uptr] = true
   501  							free(p)
   502  							delete(live, uptr)
   503  						}
   504  					}()
   505  				}
   506  				wg.Wait()
   507  			}
   508  		})
   509  
   510  		t.Run("assist-credit", func(t *testing.T) {
   511  			// Allocate and free using the same span class repeatedly while
   512  			// verifying it results in a net zero change in assist credit.
   513  			// This helps double-check our manipulation of the assist credit
   514  			// during mallocgc/freegc, including in cases when there is
   515  			// internal fragmentation when the requested mallocgc size is
   516  			// smaller than the size class.
   517  			//
   518  			// See https://go.dev/cl/717520 for some additional discussion,
   519  			// including how we can deliberately cause the test to fail currently
   520  			// if we purposefully introduce some assist credit bugs.
   521  			if SizeSpecializedMallocEnabled && !noscan {
   522  				// TODO(thepudds): skip this test at this point in the stack; later CL has
   523  				// integration with sizespecializedmalloc.
   524  				t.Skip("temporarily skip assist credit tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
   525  			}
   526  			if !RuntimeFreegcEnabled {
   527  				t.Skip("skipping assist credit test with runtime.freegc disabled")
   528  			}
   529  
   530  			// Use a background goroutine to continuously run the GC.
   531  			done := make(chan struct{})
   532  			defer close(done)
   533  			go func() {
   534  				for {
   535  					select {
   536  					case <-done:
   537  						return
   538  					default:
   539  						runtime.GC()
   540  					}
   541  				}
   542  			}()
   543  
   544  			// If making changes related to this test, consider testing locally with
   545  			// larger counts, like 100K or 1M.
   546  			counts := []int{1, 2, 10, 100 * stressMultiple}
   547  			// Dropping down to GOMAXPROCS=1 might help reduce noise.
   548  			defer GOMAXPROCS(GOMAXPROCS(1))
   549  			size := int64(unsafe.Sizeof(*new(T)))
   550  			for _, count := range counts {
   551  				// Start by forcing a GC to reset this g's assist credit
   552  				// and perhaps help us get a cleaner measurement of GC cycle count.
   553  				runtime.GC()
   554  				for i := range count {
   555  					// We disable preemption to reduce other code's ability to adjust this g's
   556  					// assist credit or otherwise change things while we are measuring.
   557  					Acquirem()
   558  
   559  					// We do two allocations per loop, with the second allocation being
   560  					// the one we measure. The first allocation tries to ensure at least one
   561  					// reusable object on the mspan's free list when we do our measured allocation.
   562  					p := alloc()
   563  					free(p)
   564  
   565  					// Now do our primary allocation of interest, bracketed by measurements.
   566  					// We measure more than we strictly need (to log details in case of a failure).
   567  					creditStart := AssistCredit()
   568  					blackenStart := GcBlackenEnable()
   569  					p = alloc()
   570  					blackenAfterAlloc := GcBlackenEnable()
   571  					creditAfterAlloc := AssistCredit()
   572  					free(p)
   573  					blackenEnd := GcBlackenEnable()
   574  					creditEnd := AssistCredit()
   575  
   576  					Releasem()
   577  					GoschedIfBusy()
   578  
   579  					delta := creditEnd - creditStart
   580  					if delta != 0 {
   581  						t.Logf("assist credit non-zero delta: %d", delta)
   582  						t.Logf("\t| size: %d i: %d count: %d", size, i, count)
   583  						t.Logf("\t| credit before: %d credit after: %d", creditStart, creditEnd)
   584  						t.Logf("\t| alloc delta: %d free delta: %d",
   585  							creditAfterAlloc-creditStart, creditEnd-creditAfterAlloc)
   586  						t.Logf("\t| gcBlackenEnable (start / after alloc / end): %v/%v/%v",
   587  							blackenStart, blackenAfterAlloc, blackenEnd)
   588  						t.FailNow()
   589  					}
   590  				}
   591  			}
   592  		})
   593  	}
   594  }
   595  
   596  func TestPageCacheLeak(t *testing.T) {
   597  	defer GOMAXPROCS(GOMAXPROCS(1))
   598  	leaked := PageCachePagesLeaked()
   599  	if leaked != 0 {
   600  		t.Fatalf("found %d leaked pages in page caches", leaked)
   601  	}
   602  }
   603  
   604  func TestPhysicalMemoryUtilization(t *testing.T) {
   605  	got := runTestProg(t, "testprog", "GCPhys")
   606  	want := "OK\n"
   607  	if got != want {
   608  		t.Fatalf("expected %q, but got %q", want, got)
   609  	}
   610  }
   611  
   612  func TestScavengedBitsCleared(t *testing.T) {
   613  	var mismatches [128]BitsMismatch
   614  	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
   615  		t.Errorf("uncleared scavenged bits")
   616  		for _, m := range mismatches[:n] {
   617  			t.Logf("\t@ address 0x%x", m.Base)
   618  			t.Logf("\t|  got: %064b", m.Got)
   619  			t.Logf("\t| want: %064b", m.Want)
   620  		}
   621  		t.FailNow()
   622  	}
   623  }
   624  
   625  type acLink struct {
   626  	x [1 << 20]byte
   627  }
   628  
   629  var arenaCollisionSink []*acLink
   630  
   631  func TestArenaCollision(t *testing.T) {
   632  	// Test that mheap.sysAlloc handles collisions with other
   633  	// memory mappings.
   634  	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
   635  		cmd := testenv.CleanCmdEnv(exec.Command(testenv.Executable(t), "-test.run=^TestArenaCollision$", "-test.v"))
   636  		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
   637  		out, err := cmd.CombinedOutput()
   638  		if race.Enabled {
   639  			// This test runs the runtime out of hint
   640  			// addresses, so it will start mapping the
   641  			// heap wherever it can. The race detector
   642  			// doesn't support this, so look for the
   643  			// expected failure.
   644  			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
   645  				t.Fatalf("want %q, got:\n%s", want, string(out))
   646  			}
   647  		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
   648  			t.Fatalf("%s\n(exit status %v)", string(out), err)
   649  		}
   650  		return
   651  	}
   652  	disallowed := [][2]uintptr{}
   653  	// Drop all but the next 3 hints. 64-bit has a lot of hints,
   654  	// so it would take a lot of memory to go through all of them.
   655  	KeepNArenaHints(3)
   656  	// Consume these 3 hints and force the runtime to find some
   657  	// fallback hints.
   658  	for i := 0; i < 5; i++ {
   659  		// Reserve memory at the next hint so it can't be used
   660  		// for the heap.
   661  		start, end, ok := MapNextArenaHint()
   662  		if !ok {
   663  			t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end)
   664  		}
   665  		t.Logf("reserved [%#x, %#x)", start, end)
   666  		disallowed = append(disallowed, [2]uintptr{start, end})
   667  
   668  		hint, ok := NextArenaHint()
   669  		if !ok {
   670  			// We're out of arena hints. There's not much we can do now except give up.
   671  			// This might happen for a number of reasons, like if there's just something
   672  			// else already mapped in the address space where we put our hints. This is
   673  			// a bit more common than it used to be thanks to heap base randomization.
   674  			t.Skip("ran out of arena hints")
   675  		}
   676  
   677  		// Allocate until the runtime tries to use the hint we
   678  		// just mapped over.
   679  		for {
   680  			if next, ok := NextArenaHint(); !ok {
   681  				t.Skip("ran out of arena hints")
   682  			} else if next != hint {
   683  				break
   684  			}
   685  			ac := new(acLink)
   686  			arenaCollisionSink = append(arenaCollisionSink, ac)
   687  			// The allocation must not have fallen into
   688  			// one of the reserved regions.
   689  			p := uintptr(unsafe.Pointer(ac))
   690  			for _, d := range disallowed {
   691  				if d[0] <= p && p < d[1] {
   692  					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
   693  				}
   694  			}
   695  		}
   696  	}
   697  }
   698  
   699  func BenchmarkMalloc8(b *testing.B) {
   700  	for i := 0; i < b.N; i++ {
   701  		p := new(int64)
   702  		Escape(p)
   703  	}
   704  }
   705  
   706  func BenchmarkMalloc16(b *testing.B) {
   707  	for i := 0; i < b.N; i++ {
   708  		p := new([2]int64)
   709  		Escape(p)
   710  	}
   711  }
   712  
   713  func BenchmarkMalloc32(b *testing.B) {
   714  	for i := 0; i < b.N; i++ {
   715  		p := new([4]int64)
   716  		Escape(p)
   717  	}
   718  }
   719  
   720  func BenchmarkMallocTypeInfo8(b *testing.B) {
   721  	for i := 0; i < b.N; i++ {
   722  		p := new(struct {
   723  			p [8 / unsafe.Sizeof(uintptr(0))]*int
   724  		})
   725  		Escape(p)
   726  	}
   727  }
   728  
   729  func BenchmarkMallocTypeInfo16(b *testing.B) {
   730  	for i := 0; i < b.N; i++ {
   731  		p := new(struct {
   732  			p [16 / unsafe.Sizeof(uintptr(0))]*int
   733  		})
   734  		Escape(p)
   735  	}
   736  }
   737  
   738  func BenchmarkMallocTypeInfo32(b *testing.B) {
   739  	for i := 0; i < b.N; i++ {
   740  		p := new(struct {
   741  			p [32 / unsafe.Sizeof(uintptr(0))]*int
   742  		})
   743  		Escape(p)
   744  	}
   745  }
   746  
   747  type LargeStruct struct {
   748  	x [16][]byte
   749  }
   750  
   751  func BenchmarkMallocLargeStruct(b *testing.B) {
   752  	for i := 0; i < b.N; i++ {
   753  		p := make([]LargeStruct, 2)
   754  		Escape(p)
   755  	}
   756  }
   757  
   758  var n = flag.Int("n", 1000, "number of goroutines")
   759  
   760  func BenchmarkGoroutineSelect(b *testing.B) {
   761  	quit := make(chan struct{})
   762  	read := func(ch chan struct{}) {
   763  		for {
   764  			select {
   765  			case _, ok := <-ch:
   766  				if !ok {
   767  					return
   768  				}
   769  			case <-quit:
   770  				return
   771  			}
   772  		}
   773  	}
   774  	benchHelper(b, *n, read)
   775  }
   776  
   777  func BenchmarkGoroutineBlocking(b *testing.B) {
   778  	read := func(ch chan struct{}) {
   779  		for {
   780  			if _, ok := <-ch; !ok {
   781  				return
   782  			}
   783  		}
   784  	}
   785  	benchHelper(b, *n, read)
   786  }
   787  
   788  func BenchmarkGoroutineForRange(b *testing.B) {
   789  	read := func(ch chan struct{}) {
   790  		for range ch {
   791  		}
   792  	}
   793  	benchHelper(b, *n, read)
   794  }
   795  
   796  func benchHelper(b *testing.B, n int, read func(chan struct{})) {
   797  	m := make([]chan struct{}, n)
   798  	for i := range m {
   799  		m[i] = make(chan struct{}, 1)
   800  		go read(m[i])
   801  	}
   802  	b.StopTimer()
   803  	b.ResetTimer()
   804  	GC()
   805  
   806  	for i := 0; i < b.N; i++ {
   807  		for _, ch := range m {
   808  			if ch != nil {
   809  				ch <- struct{}{}
   810  			}
   811  		}
   812  		time.Sleep(10 * time.Millisecond)
   813  		b.StartTimer()
   814  		GC()
   815  		b.StopTimer()
   816  	}
   817  
   818  	for _, ch := range m {
   819  		close(ch)
   820  	}
   821  	time.Sleep(10 * time.Millisecond)
   822  }
   823  
   824  func BenchmarkGoroutineIdle(b *testing.B) {
   825  	quit := make(chan struct{})
   826  	fn := func() {
   827  		<-quit
   828  	}
   829  	for i := 0; i < *n; i++ {
   830  		go fn()
   831  	}
   832  
   833  	GC()
   834  	b.ResetTimer()
   835  
   836  	for i := 0; i < b.N; i++ {
   837  		GC()
   838  	}
   839  
   840  	b.StopTimer()
   841  	close(quit)
   842  	time.Sleep(10 * time.Millisecond)
   843  }
   844  
   845  func TestMkmalloc(t *testing.T) {
   846  	testenv.MustHaveGoRun(t)
   847  	testenv.MustHaveExternalNetwork(t) // To download the golang.org/x/tools dependency.
   848  	output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput()
   849  	t.Logf("test output:\n%s", output)
   850  	if err != nil {
   851  		t.Errorf("_mkmalloc tests failed: %v", err)
   852  	}
   853  }
   854  

View as plain text