Source file src/runtime/slice_test.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"internal/asan"
    10  	"internal/goexperiment"
    11  	"internal/msan"
    12  	"internal/race"
    13  	"internal/testenv"
    14  	"runtime"
    15  	"testing"
    16  )
    17  
    18  const N = 20
    19  
    20  func BenchmarkMakeSliceCopy(b *testing.B) {
    21  	const length = 32
    22  	var bytes = make([]byte, 8*length)
    23  	var ints = make([]int, length)
    24  	var ptrs = make([]*byte, length)
    25  	b.Run("mallocmove", func(b *testing.B) {
    26  		b.Run("Byte", func(b *testing.B) {
    27  			var x []byte
    28  			for i := 0; i < b.N; i++ {
    29  				x = make([]byte, len(bytes))
    30  				copy(x, bytes)
    31  			}
    32  		})
    33  		b.Run("Int", func(b *testing.B) {
    34  			var x []int
    35  			for i := 0; i < b.N; i++ {
    36  				x = make([]int, len(ints))
    37  				copy(x, ints)
    38  			}
    39  		})
    40  		b.Run("Ptr", func(b *testing.B) {
    41  			var x []*byte
    42  			for i := 0; i < b.N; i++ {
    43  				x = make([]*byte, len(ptrs))
    44  				copy(x, ptrs)
    45  			}
    46  
    47  		})
    48  	})
    49  	b.Run("makecopy", func(b *testing.B) {
    50  		b.Run("Byte", func(b *testing.B) {
    51  			var x []byte
    52  			for i := 0; i < b.N; i++ {
    53  				x = make([]byte, 8*length)
    54  				copy(x, bytes)
    55  			}
    56  		})
    57  		b.Run("Int", func(b *testing.B) {
    58  			var x []int
    59  			for i := 0; i < b.N; i++ {
    60  				x = make([]int, length)
    61  				copy(x, ints)
    62  			}
    63  		})
    64  		b.Run("Ptr", func(b *testing.B) {
    65  			var x []*byte
    66  			for i := 0; i < b.N; i++ {
    67  				x = make([]*byte, length)
    68  				copy(x, ptrs)
    69  			}
    70  
    71  		})
    72  	})
    73  	b.Run("nilappend", func(b *testing.B) {
    74  		b.Run("Byte", func(b *testing.B) {
    75  			var x []byte
    76  			for i := 0; i < b.N; i++ {
    77  				x = append([]byte(nil), bytes...)
    78  				_ = x
    79  			}
    80  		})
    81  		b.Run("Int", func(b *testing.B) {
    82  			var x []int
    83  			for i := 0; i < b.N; i++ {
    84  				x = append([]int(nil), ints...)
    85  				_ = x
    86  			}
    87  		})
    88  		b.Run("Ptr", func(b *testing.B) {
    89  			var x []*byte
    90  			for i := 0; i < b.N; i++ {
    91  				x = append([]*byte(nil), ptrs...)
    92  				_ = x
    93  			}
    94  		})
    95  	})
    96  }
    97  
    98  type (
    99  	struct24 struct{ a, b, c int64 }
   100  	struct32 struct{ a, b, c, d int64 }
   101  	struct40 struct{ a, b, c, d, e int64 }
   102  )
   103  
   104  func BenchmarkMakeSlice(b *testing.B) {
   105  	const length = 2
   106  	b.Run("Byte", func(b *testing.B) {
   107  		var x []byte
   108  		for i := 0; i < b.N; i++ {
   109  			x = make([]byte, length, 2*length)
   110  			_ = x
   111  		}
   112  	})
   113  	b.Run("Int16", func(b *testing.B) {
   114  		var x []int16
   115  		for i := 0; i < b.N; i++ {
   116  			x = make([]int16, length, 2*length)
   117  			_ = x
   118  		}
   119  	})
   120  	b.Run("Int", func(b *testing.B) {
   121  		var x []int
   122  		for i := 0; i < b.N; i++ {
   123  			x = make([]int, length, 2*length)
   124  			_ = x
   125  		}
   126  	})
   127  	b.Run("Ptr", func(b *testing.B) {
   128  		var x []*byte
   129  		for i := 0; i < b.N; i++ {
   130  			x = make([]*byte, length, 2*length)
   131  			_ = x
   132  		}
   133  	})
   134  	b.Run("Struct", func(b *testing.B) {
   135  		b.Run("24", func(b *testing.B) {
   136  			var x []struct24
   137  			for i := 0; i < b.N; i++ {
   138  				x = make([]struct24, length, 2*length)
   139  				_ = x
   140  			}
   141  		})
   142  		b.Run("32", func(b *testing.B) {
   143  			var x []struct32
   144  			for i := 0; i < b.N; i++ {
   145  				x = make([]struct32, length, 2*length)
   146  				_ = x
   147  			}
   148  		})
   149  		b.Run("40", func(b *testing.B) {
   150  			var x []struct40
   151  			for i := 0; i < b.N; i++ {
   152  				x = make([]struct40, length, 2*length)
   153  				_ = x
   154  			}
   155  		})
   156  
   157  	})
   158  }
   159  
   160  func BenchmarkGrowSlice(b *testing.B) {
   161  	b.Run("Byte", func(b *testing.B) {
   162  		x := make([]byte, 9)
   163  		for i := 0; i < b.N; i++ {
   164  			_ = append([]byte(nil), x...)
   165  		}
   166  	})
   167  	b.Run("Int16", func(b *testing.B) {
   168  		x := make([]int16, 9)
   169  		for i := 0; i < b.N; i++ {
   170  			_ = append([]int16(nil), x...)
   171  		}
   172  	})
   173  	b.Run("Int", func(b *testing.B) {
   174  		x := make([]int, 9)
   175  		for i := 0; i < b.N; i++ {
   176  			_ = append([]int(nil), x...)
   177  		}
   178  	})
   179  	b.Run("Ptr", func(b *testing.B) {
   180  		x := make([]*byte, 9)
   181  		for i := 0; i < b.N; i++ {
   182  			_ = append([]*byte(nil), x...)
   183  		}
   184  	})
   185  	b.Run("Struct", func(b *testing.B) {
   186  		b.Run("24", func(b *testing.B) {
   187  			x := make([]struct24, 9)
   188  			for i := 0; i < b.N; i++ {
   189  				_ = append([]struct24(nil), x...)
   190  			}
   191  		})
   192  		b.Run("32", func(b *testing.B) {
   193  			x := make([]struct32, 9)
   194  			for i := 0; i < b.N; i++ {
   195  				_ = append([]struct32(nil), x...)
   196  			}
   197  		})
   198  		b.Run("40", func(b *testing.B) {
   199  			x := make([]struct40, 9)
   200  			for i := 0; i < b.N; i++ {
   201  				_ = append([]struct40(nil), x...)
   202  			}
   203  		})
   204  
   205  	})
   206  }
   207  
   208  var (
   209  	SinkIntSlice        []int
   210  	SinkIntPointerSlice []*int
   211  )
   212  
   213  func BenchmarkExtendSlice(b *testing.B) {
   214  	var length = 4 // Use a variable to prevent stack allocation of slices.
   215  	b.Run("IntSlice", func(b *testing.B) {
   216  		s := make([]int, 0, length)
   217  		for i := 0; i < b.N; i++ {
   218  			s = append(s[:0:length/2], make([]int, length)...)
   219  		}
   220  		SinkIntSlice = s
   221  	})
   222  	b.Run("PointerSlice", func(b *testing.B) {
   223  		s := make([]*int, 0, length)
   224  		for i := 0; i < b.N; i++ {
   225  			s = append(s[:0:length/2], make([]*int, length)...)
   226  		}
   227  		SinkIntPointerSlice = s
   228  	})
   229  	b.Run("NoGrow", func(b *testing.B) {
   230  		s := make([]int, 0, length)
   231  		for i := 0; i < b.N; i++ {
   232  			s = append(s[:0:length], make([]int, length)...)
   233  		}
   234  		SinkIntSlice = s
   235  	})
   236  }
   237  
   238  func BenchmarkAppend(b *testing.B) {
   239  	b.StopTimer()
   240  	x := make([]int, 0, N)
   241  	b.StartTimer()
   242  	for i := 0; i < b.N; i++ {
   243  		x = x[0:0]
   244  		for j := 0; j < N; j++ {
   245  			x = append(x, j)
   246  		}
   247  	}
   248  }
   249  
   250  func BenchmarkAppendGrowByte(b *testing.B) {
   251  	for i := 0; i < b.N; i++ {
   252  		var x []byte
   253  		for j := 0; j < 1<<20; j++ {
   254  			x = append(x, byte(j))
   255  		}
   256  	}
   257  }
   258  
   259  func BenchmarkAppendGrowString(b *testing.B) {
   260  	var s string
   261  	for i := 0; i < b.N; i++ {
   262  		var x []string
   263  		for j := 0; j < 1<<20; j++ {
   264  			x = append(x, s)
   265  		}
   266  	}
   267  }
   268  
   269  func BenchmarkAppendSlice(b *testing.B) {
   270  	for _, length := range []int{1, 4, 7, 8, 15, 16, 32} {
   271  		b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
   272  			x := make([]byte, 0, N)
   273  			y := make([]byte, length)
   274  			for i := 0; i < b.N; i++ {
   275  				x = x[0:0]
   276  				x = append(x, y...)
   277  			}
   278  		})
   279  	}
   280  }
   281  
   282  var (
   283  	blackhole []byte
   284  )
   285  
   286  func BenchmarkAppendSliceLarge(b *testing.B) {
   287  	for _, length := range []int{1 << 10, 4 << 10, 16 << 10, 64 << 10, 256 << 10, 1024 << 10} {
   288  		y := make([]byte, length)
   289  		b.Run(fmt.Sprint(length, "Bytes"), func(b *testing.B) {
   290  			for i := 0; i < b.N; i++ {
   291  				blackhole = nil
   292  				blackhole = append(blackhole, y...)
   293  			}
   294  		})
   295  	}
   296  }
   297  
   298  func BenchmarkAppendStr(b *testing.B) {
   299  	for _, str := range []string{
   300  		"1",
   301  		"1234",
   302  		"12345678",
   303  		"1234567890123456",
   304  		"12345678901234567890123456789012",
   305  	} {
   306  		b.Run(fmt.Sprint(len(str), "Bytes"), func(b *testing.B) {
   307  			x := make([]byte, 0, N)
   308  			for i := 0; i < b.N; i++ {
   309  				x = x[0:0]
   310  				x = append(x, str...)
   311  			}
   312  		})
   313  	}
   314  }
   315  
   316  func BenchmarkAppendSpecialCase(b *testing.B) {
   317  	b.StopTimer()
   318  	x := make([]int, 0, N)
   319  	b.StartTimer()
   320  	for i := 0; i < b.N; i++ {
   321  		x = x[0:0]
   322  		for j := 0; j < N; j++ {
   323  			if len(x) < cap(x) {
   324  				x = x[:len(x)+1]
   325  				x[len(x)-1] = j
   326  			} else {
   327  				x = append(x, j)
   328  			}
   329  		}
   330  	}
   331  }
   332  
   333  var x []int
   334  
   335  func f() int {
   336  	x[:1][0] = 3
   337  	return 2
   338  }
   339  
   340  func TestSideEffectOrder(t *testing.T) {
   341  	x = make([]int, 0, 10)
   342  	x = append(x, 1, f())
   343  	if x[0] != 1 || x[1] != 2 {
   344  		t.Error("append failed: ", x[0], x[1])
   345  	}
   346  }
   347  
   348  func TestAppendOverlap(t *testing.T) {
   349  	x := []byte("1234")
   350  	x = append(x[1:], x...) // p > q in runtimeĀ·appendslice.
   351  	got := string(x)
   352  	want := "2341234"
   353  	if got != want {
   354  		t.Errorf("overlap failed: got %q want %q", got, want)
   355  	}
   356  }
   357  
   358  func BenchmarkCopy(b *testing.B) {
   359  	for _, l := range []int{1, 2, 4, 8, 12, 16, 32, 128, 1024} {
   360  		buf := make([]byte, 4096)
   361  		b.Run(fmt.Sprint(l, "Byte"), func(b *testing.B) {
   362  			s := make([]byte, l)
   363  			var n int
   364  			for i := 0; i < b.N; i++ {
   365  				n = copy(buf, s)
   366  			}
   367  			b.SetBytes(int64(n))
   368  		})
   369  		b.Run(fmt.Sprint(l, "String"), func(b *testing.B) {
   370  			s := string(make([]byte, l))
   371  			var n int
   372  			for i := 0; i < b.N; i++ {
   373  				n = copy(buf, s)
   374  			}
   375  			b.SetBytes(int64(n))
   376  		})
   377  	}
   378  }
   379  
   380  var (
   381  	sByte []byte
   382  	s1Ptr []uintptr
   383  	s2Ptr [][2]uintptr
   384  	s3Ptr [][3]uintptr
   385  	s4Ptr [][4]uintptr
   386  )
   387  
   388  // BenchmarkAppendInPlace tests the performance of append
   389  // when the result is being written back to the same slice.
   390  // In order for the in-place optimization to occur,
   391  // the slice must be referred to by address;
   392  // using a global is an easy way to trigger that.
   393  // We test the "grow" and "no grow" paths separately,
   394  // but not the "normal" (occasionally grow) path,
   395  // because it is a blend of the other two.
   396  // We use small numbers and small sizes in an attempt
   397  // to avoid benchmarking memory allocation and copying.
   398  // We use scalars instead of pointers in an attempt
   399  // to avoid benchmarking the write barriers.
   400  // We benchmark four common sizes (byte, pointer, string/interface, slice),
   401  // and one larger size.
   402  func BenchmarkAppendInPlace(b *testing.B) {
   403  	b.Run("NoGrow", func(b *testing.B) {
   404  		const C = 128
   405  
   406  		b.Run("Byte", func(b *testing.B) {
   407  			for i := 0; i < b.N; i++ {
   408  				sByte = make([]byte, C)
   409  				for j := 0; j < C; j++ {
   410  					sByte = append(sByte, 0x77)
   411  				}
   412  			}
   413  		})
   414  
   415  		b.Run("1Ptr", func(b *testing.B) {
   416  			for i := 0; i < b.N; i++ {
   417  				s1Ptr = make([]uintptr, C)
   418  				for j := 0; j < C; j++ {
   419  					s1Ptr = append(s1Ptr, 0x77)
   420  				}
   421  			}
   422  		})
   423  
   424  		b.Run("2Ptr", func(b *testing.B) {
   425  			for i := 0; i < b.N; i++ {
   426  				s2Ptr = make([][2]uintptr, C)
   427  				for j := 0; j < C; j++ {
   428  					s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
   429  				}
   430  			}
   431  		})
   432  
   433  		b.Run("3Ptr", func(b *testing.B) {
   434  			for i := 0; i < b.N; i++ {
   435  				s3Ptr = make([][3]uintptr, C)
   436  				for j := 0; j < C; j++ {
   437  					s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
   438  				}
   439  			}
   440  		})
   441  
   442  		b.Run("4Ptr", func(b *testing.B) {
   443  			for i := 0; i < b.N; i++ {
   444  				s4Ptr = make([][4]uintptr, C)
   445  				for j := 0; j < C; j++ {
   446  					s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
   447  				}
   448  			}
   449  		})
   450  
   451  	})
   452  
   453  	b.Run("Grow", func(b *testing.B) {
   454  		const C = 5
   455  
   456  		b.Run("Byte", func(b *testing.B) {
   457  			for i := 0; i < b.N; i++ {
   458  				sByte = make([]byte, 0)
   459  				for j := 0; j < C; j++ {
   460  					sByte = append(sByte, 0x77)
   461  					sByte = sByte[:cap(sByte)]
   462  				}
   463  			}
   464  		})
   465  
   466  		b.Run("1Ptr", func(b *testing.B) {
   467  			for i := 0; i < b.N; i++ {
   468  				s1Ptr = make([]uintptr, 0)
   469  				for j := 0; j < C; j++ {
   470  					s1Ptr = append(s1Ptr, 0x77)
   471  					s1Ptr = s1Ptr[:cap(s1Ptr)]
   472  				}
   473  			}
   474  		})
   475  
   476  		b.Run("2Ptr", func(b *testing.B) {
   477  			for i := 0; i < b.N; i++ {
   478  				s2Ptr = make([][2]uintptr, 0)
   479  				for j := 0; j < C; j++ {
   480  					s2Ptr = append(s2Ptr, [2]uintptr{0x77, 0x88})
   481  					s2Ptr = s2Ptr[:cap(s2Ptr)]
   482  				}
   483  			}
   484  		})
   485  
   486  		b.Run("3Ptr", func(b *testing.B) {
   487  			for i := 0; i < b.N; i++ {
   488  				s3Ptr = make([][3]uintptr, 0)
   489  				for j := 0; j < C; j++ {
   490  					s3Ptr = append(s3Ptr, [3]uintptr{0x77, 0x88, 0x99})
   491  					s3Ptr = s3Ptr[:cap(s3Ptr)]
   492  				}
   493  			}
   494  		})
   495  
   496  		b.Run("4Ptr", func(b *testing.B) {
   497  			for i := 0; i < b.N; i++ {
   498  				s4Ptr = make([][4]uintptr, 0)
   499  				for j := 0; j < C; j++ {
   500  					s4Ptr = append(s4Ptr, [4]uintptr{0x77, 0x88, 0x99, 0xAA})
   501  					s4Ptr = s4Ptr[:cap(s4Ptr)]
   502  				}
   503  			}
   504  		})
   505  
   506  	})
   507  }
   508  
   509  //go:noinline
   510  func byteSlice(n int) []byte {
   511  	var r []byte
   512  	for i := range n {
   513  		r = append(r, byte(i))
   514  	}
   515  	return r
   516  }
   517  func TestAppendByteInLoop(t *testing.T) {
   518  	testenv.SkipIfOptimizationOff(t)
   519  	if race.Enabled {
   520  		t.Skip("skipping in -race mode")
   521  	}
   522  	if asan.Enabled || msan.Enabled {
   523  		t.Skip("skipping in sanitizer mode")
   524  	}
   525  	for _, test := range [][3]int{
   526  		{0, 0, 0},
   527  		{1, 1, 8},
   528  		{2, 1, 8},
   529  		{8, 1, 8},
   530  		{9, 1, 16},
   531  		{16, 1, 16},
   532  		{17, 1, 24},
   533  		{24, 1, 24},
   534  		{25, 1, 32},
   535  		{32, 1, 32},
   536  		{33, 1, 64}, // If we up the stack buffer size from 32->64, this line and the next would become 48.
   537  		{48, 1, 64},
   538  		{49, 1, 64},
   539  		{64, 1, 64},
   540  		{65, 2, 128},
   541  	} {
   542  		n := test[0]
   543  		want := test[1]
   544  		wantCap := test[2]
   545  
   546  		if goexperiment.RuntimeFreegc && n > 64 {
   547  			// Only 1 allocation is expected to be reported.
   548  			//
   549  			// TODO(thepudds): consider a test export or similar that lets us more directly see
   550  			// the count of freed objects, reused objects, and allocated objects. This could
   551  			// be in advance of any future runtime/metrics or user-visible API, or perhaps
   552  			// we could introduce the concept of build tag or debug flag controlled runtime/metrics
   553  			// targeting people working on the runtime and compiler (but not formally supported).
   554  			want = 1
   555  		}
   556  
   557  		var r []byte
   558  		got := testing.AllocsPerRun(10, func() {
   559  			r = byteSlice(n)
   560  		})
   561  		if got != float64(want) {
   562  			t.Errorf("for size %d, got %f allocs want %d", n, got, want)
   563  		}
   564  		if cap(r) != wantCap {
   565  			t.Errorf("for size %d, got capacity %d want %d", n, cap(r), wantCap)
   566  		}
   567  	}
   568  }
   569  
   570  //go:noinline
   571  func ptrSlice(n int, p *[]*byte) {
   572  	var r []*byte
   573  	for range n {
   574  		r = append(r, nil)
   575  	}
   576  	*p = r
   577  }
   578  func TestAppendPtrInLoop(t *testing.T) {
   579  	testenv.SkipIfOptimizationOff(t)
   580  	if race.Enabled {
   581  		t.Skip("skipping in -race mode")
   582  	}
   583  	if asan.Enabled || msan.Enabled {
   584  		t.Skip("skipping in sanitizer mode")
   585  	}
   586  	var tests [][3]int
   587  	if runtime.PtrSize == 8 {
   588  		tests = [][3]int{
   589  			{0, 0, 0},
   590  			{1, 1, 1},
   591  			{2, 1, 2},
   592  			{3, 1, 3}, // This is the interesting case, allocates 24 bytes when before it was 32.
   593  			{4, 1, 4},
   594  			{5, 1, 8},
   595  			{6, 1, 8},
   596  			{7, 1, 8},
   597  			{8, 1, 8},
   598  			{9, 2, 16},
   599  		}
   600  	} else {
   601  		tests = [][3]int{
   602  			{0, 0, 0},
   603  			{1, 1, 2},
   604  			{2, 1, 2},
   605  			{3, 1, 4},
   606  			{4, 1, 4},
   607  			{5, 1, 6}, // These two are also 24 bytes instead of 32.
   608  			{6, 1, 6}, //
   609  			{7, 1, 8},
   610  			{8, 1, 8},
   611  			{9, 1, 16},
   612  			{10, 1, 16},
   613  			{11, 1, 16},
   614  			{12, 1, 16},
   615  			{13, 1, 16},
   616  			{14, 1, 16},
   617  			{15, 1, 16},
   618  			{16, 1, 16},
   619  			{17, 2, 32},
   620  		}
   621  	}
   622  	for _, test := range tests {
   623  		n := test[0]
   624  		want := test[1]
   625  		wantCap := test[2]
   626  		var r []*byte
   627  		got := testing.AllocsPerRun(10, func() {
   628  			ptrSlice(n, &r)
   629  		})
   630  		if got != float64(want) {
   631  			t.Errorf("for size %d, got %f allocs want %d", n, got, want)
   632  		}
   633  		if cap(r) != wantCap {
   634  			t.Errorf("for size %d, got capacity %d want %d", n, cap(r), wantCap)
   635  		}
   636  	}
   637  }
   638  
   639  //go:noinline
   640  func byteCapSlice(n int) ([]byte, int) {
   641  	var r []byte
   642  	for i := range n {
   643  		r = append(r, byte(i))
   644  	}
   645  	return r, cap(r)
   646  }
   647  func TestAppendByteCapInLoop(t *testing.T) {
   648  	testenv.SkipIfOptimizationOff(t)
   649  	if race.Enabled {
   650  		t.Skip("skipping in -race mode")
   651  	}
   652  	if asan.Enabled || msan.Enabled {
   653  		t.Skip("skipping in sanitizer mode")
   654  	}
   655  	for _, test := range [][3]int{
   656  		{0, 0, 0},
   657  		{1, 1, 8},
   658  		{2, 1, 8},
   659  		{8, 1, 8},
   660  		{9, 1, 16},
   661  		{16, 1, 16},
   662  		{17, 1, 24},
   663  		{24, 1, 24},
   664  		{25, 1, 32},
   665  		{32, 1, 32},
   666  		{33, 1, 64},
   667  		{48, 1, 64},
   668  		{49, 1, 64},
   669  		{64, 1, 64},
   670  		{65, 2, 128},
   671  	} {
   672  		n := test[0]
   673  		want := test[1]
   674  		wantCap := test[2]
   675  
   676  		if goexperiment.RuntimeFreegc && n > 64 {
   677  			// Only 1 allocation is expected to be reported.
   678  			want = 1
   679  		}
   680  
   681  		var r []byte
   682  		got := testing.AllocsPerRun(10, func() {
   683  			r, _ = byteCapSlice(n)
   684  		})
   685  		if got != float64(want) {
   686  			t.Errorf("for size %d, got %f allocs want %d", n, got, want)
   687  		}
   688  		if cap(r) != wantCap {
   689  			t.Errorf("for size %d, got capacity %d want %d", n, cap(r), wantCap)
   690  		}
   691  	}
   692  }
   693  
   694  func TestAppendGeneric(t *testing.T) {
   695  	type I *int
   696  	r := testAppendGeneric[I](100)
   697  	if len(r) != 100 {
   698  		t.Errorf("bad length")
   699  	}
   700  }
   701  
   702  //go:noinline
   703  func testAppendGeneric[E any](n int) []E {
   704  	var r []E
   705  	var z E
   706  	for range n {
   707  		r = append(r, z)
   708  	}
   709  	return r
   710  }
   711  
   712  func appendSomeBytes(r []byte, s []byte) []byte {
   713  	for _, b := range s {
   714  		r = append(r, b)
   715  	}
   716  	return r
   717  }
   718  
   719  func TestAppendOfArg(t *testing.T) {
   720  	r := make([]byte, 24)
   721  	for i := 0; i < 24; i++ {
   722  		r[i] = byte(i)
   723  	}
   724  	appendSomeBytes(r, []byte{25, 26, 27})
   725  	// Do the same thing, trying to overwrite any
   726  	// stack-allocated buffers used above.
   727  	s := make([]byte, 24)
   728  	for i := 0; i < 24; i++ {
   729  		s[i] = 99
   730  	}
   731  	appendSomeBytes(s, []byte{99, 99, 99})
   732  	// Check that we still have the right data.
   733  	for i, b := range r {
   734  		if b != byte(i) {
   735  			t.Errorf("r[%d]=%d, want %d", i, b, byte(i))
   736  		}
   737  	}
   738  
   739  }
   740  
   741  func BenchmarkAppendInLoop(b *testing.B) {
   742  	for _, size := range []int{0, 1, 8, 16, 32, 64, 128} {
   743  		b.Run(fmt.Sprintf("%d", size),
   744  			func(b *testing.B) {
   745  				b.ReportAllocs()
   746  				for b.Loop() {
   747  					byteSlice(size)
   748  				}
   749  			})
   750  	}
   751  }
   752  
   753  func TestMoveToHeapEarly(t *testing.T) {
   754  	// Just checking that this compiles.
   755  	var x []int
   756  	y := x // causes a move2heap in the entry block
   757  	for range 5 {
   758  		x = append(x, 5)
   759  	}
   760  	_ = y
   761  }
   762  
   763  func TestMoveToHeapCap(t *testing.T) {
   764  	var c int
   765  	r := func() []byte {
   766  		var s []byte
   767  		for i := range 10 {
   768  			s = append(s, byte(i))
   769  		}
   770  		c = cap(s)
   771  		return s
   772  	}()
   773  	if c != cap(r) {
   774  		t.Errorf("got cap=%d, want %d", c, cap(r))
   775  	}
   776  	sinkSlice = r
   777  }
   778  
   779  //go:noinline
   780  func runit(f func()) {
   781  	f()
   782  }
   783  
   784  func TestMoveToHeapClosure1(t *testing.T) {
   785  	var c int
   786  	r := func() []byte {
   787  		var s []byte
   788  		for i := range 10 {
   789  			s = append(s, byte(i))
   790  		}
   791  		runit(func() {
   792  			c = cap(s)
   793  		})
   794  		return s
   795  	}()
   796  	if c != cap(r) {
   797  		t.Errorf("got cap=%d, want %d", c, cap(r))
   798  	}
   799  	sinkSlice = r
   800  }
   801  func TestMoveToHeapClosure2(t *testing.T) {
   802  	var c int
   803  	r := func() []byte {
   804  		var s []byte
   805  		for i := range 10 {
   806  			s = append(s, byte(i))
   807  		}
   808  		c = func() int {
   809  			return cap(s)
   810  		}()
   811  		return s
   812  	}()
   813  	if c != cap(r) {
   814  		t.Errorf("got cap=%d, want %d", c, cap(r))
   815  	}
   816  	sinkSlice = r
   817  }
   818  
   819  //go:noinline
   820  func buildClosure(t *testing.T) ([]byte, func()) {
   821  	var s []byte
   822  	for i := range 20 {
   823  		s = append(s, byte(i))
   824  	}
   825  	c := func() {
   826  		for i, b := range s {
   827  			if b != byte(i) {
   828  				t.Errorf("s[%d]=%d, want %d", i, b, i)
   829  			}
   830  		}
   831  	}
   832  	return s, c
   833  }
   834  
   835  func TestMoveToHeapClosure3(t *testing.T) {
   836  	_, f := buildClosure(t)
   837  	overwriteStack(0)
   838  	f()
   839  }
   840  
   841  //go:noinline
   842  func overwriteStack(n int) uint64 {
   843  	var x [100]uint64
   844  	for i := range x {
   845  		x[i] = 0xabcdabcdabcdabcd
   846  	}
   847  	return x[n]
   848  }
   849  
   850  var sinkSlice []byte
   851  

View as plain text