Source file src/runtime/arena_test.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"internal/goarch"
     9  	"internal/runtime/atomic"
    10  	"reflect"
    11  	. "runtime"
    12  	"runtime/debug"
    13  	"testing"
    14  	"time"
    15  	"unsafe"
    16  )
    17  
    18  type smallScalar struct {
    19  	X uintptr
    20  }
    21  type smallPointer struct {
    22  	X *smallPointer
    23  }
    24  type smallPointerMix struct {
    25  	A *smallPointer
    26  	B byte
    27  	C *smallPointer
    28  	D [11]byte
    29  }
    30  type mediumScalarEven [8192]byte
    31  type mediumScalarOdd [3321]byte
    32  type mediumPointerEven [1024]*smallPointer
    33  type mediumPointerOdd [1023]*smallPointer
    34  
    35  type largeScalar [UserArenaChunkBytes + 1]byte
    36  type largePointer [UserArenaChunkBytes/unsafe.Sizeof(&smallPointer{}) + 1]*smallPointer
    37  
    38  func TestUserArena(t *testing.T) {
    39  	if Clobberfree() {
    40  		// This test crashes with SEGV in clobberfree in mgcsweep.go with GODEBUG=clobberfree=1.
    41  		t.Skip("triggers SEGV with GODEBUG=clobberfree=1")
    42  	}
    43  
    44  	// Set GOMAXPROCS to 2 so we don't run too many of these
    45  	// tests in parallel.
    46  	defer GOMAXPROCS(GOMAXPROCS(2))
    47  
    48  	// Start a subtest so that we can clean up after any parallel tests within.
    49  	t.Run("Alloc", func(t *testing.T) {
    50  		ss := &smallScalar{5}
    51  		runSubTestUserArenaNew(t, ss, true)
    52  
    53  		sp := &smallPointer{new(smallPointer)}
    54  		runSubTestUserArenaNew(t, sp, true)
    55  
    56  		spm := &smallPointerMix{sp, 5, nil, [11]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}}
    57  		runSubTestUserArenaNew(t, spm, true)
    58  
    59  		mse := new(mediumScalarEven)
    60  		for i := range mse {
    61  			mse[i] = 121
    62  		}
    63  		runSubTestUserArenaNew(t, mse, true)
    64  
    65  		mso := new(mediumScalarOdd)
    66  		for i := range mso {
    67  			mso[i] = 122
    68  		}
    69  		runSubTestUserArenaNew(t, mso, true)
    70  
    71  		mpe := new(mediumPointerEven)
    72  		for i := range mpe {
    73  			mpe[i] = sp
    74  		}
    75  		runSubTestUserArenaNew(t, mpe, true)
    76  
    77  		mpo := new(mediumPointerOdd)
    78  		for i := range mpo {
    79  			mpo[i] = sp
    80  		}
    81  		runSubTestUserArenaNew(t, mpo, true)
    82  
    83  		ls := new(largeScalar)
    84  		for i := range ls {
    85  			ls[i] = 123
    86  		}
    87  		// Not in parallel because we don't want to hold this large allocation live.
    88  		runSubTestUserArenaNew(t, ls, false)
    89  
    90  		lp := new(largePointer)
    91  		for i := range lp {
    92  			lp[i] = sp
    93  		}
    94  		// Not in parallel because we don't want to hold this large allocation live.
    95  		runSubTestUserArenaNew(t, lp, false)
    96  
    97  		sss := make([]smallScalar, 25)
    98  		for i := range sss {
    99  			sss[i] = smallScalar{12}
   100  		}
   101  		runSubTestUserArenaSlice(t, sss, true)
   102  
   103  		mpos := make([]mediumPointerOdd, 5)
   104  		for i := range mpos {
   105  			mpos[i] = *mpo
   106  		}
   107  		runSubTestUserArenaSlice(t, mpos, true)
   108  
   109  		sps := make([]smallPointer, UserArenaChunkBytes/unsafe.Sizeof(smallPointer{})+1)
   110  		for i := range sps {
   111  			sps[i] = *sp
   112  		}
   113  		// Not in parallel because we don't want to hold this large allocation live.
   114  		runSubTestUserArenaSlice(t, sps, false)
   115  
   116  		// Test zero-sized types.
   117  		t.Run("struct{}", func(t *testing.T) {
   118  			arena := NewUserArena()
   119  			var x any
   120  			x = (*struct{})(nil)
   121  			arena.New(&x)
   122  			if v := unsafe.Pointer(x.(*struct{})); v != ZeroBase {
   123  				t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase)
   124  			}
   125  			arena.Free()
   126  		})
   127  		t.Run("[]struct{}", func(t *testing.T) {
   128  			arena := NewUserArena()
   129  			var sl []struct{}
   130  			arena.Slice(&sl, 10)
   131  			if v := unsafe.Pointer(&sl[0]); v != ZeroBase {
   132  				t.Errorf("expected zero-sized type to be allocated as zerobase: got %x, want %x", v, ZeroBase)
   133  			}
   134  			arena.Free()
   135  		})
   136  		t.Run("[]int (cap 0)", func(t *testing.T) {
   137  			arena := NewUserArena()
   138  			var sl []int
   139  			arena.Slice(&sl, 0)
   140  			if len(sl) != 0 {
   141  				t.Errorf("expected requested zero-sized slice to still have zero length: got %x, want 0", len(sl))
   142  			}
   143  			arena.Free()
   144  		})
   145  	})
   146  
   147  	// Run a GC cycle to get any arenas off the quarantine list.
   148  	GC()
   149  
   150  	if n := GlobalWaitingArenaChunks(); n != 0 {
   151  		t.Errorf("expected zero waiting arena chunks, found %d", n)
   152  	}
   153  }
   154  
   155  func runSubTestUserArenaNew[S comparable](t *testing.T, value *S, parallel bool) {
   156  	t.Run(reflect.TypeOf(value).Elem().Name(), func(t *testing.T) {
   157  		if parallel {
   158  			t.Parallel()
   159  		}
   160  
   161  		// Allocate and write data, enough to exhaust the arena.
   162  		//
   163  		// This is an underestimate, likely leaving some space in the arena. That's a good thing,
   164  		// because it gives us coverage of boundary cases.
   165  		n := int(UserArenaChunkBytes / unsafe.Sizeof(*value))
   166  		if n == 0 {
   167  			n = 1
   168  		}
   169  
   170  		// Create a new arena and do a bunch of operations on it.
   171  		arena := NewUserArena()
   172  
   173  		arenaValues := make([]*S, 0, n)
   174  		for j := 0; j < n; j++ {
   175  			var x any
   176  			x = (*S)(nil)
   177  			arena.New(&x)
   178  			s := x.(*S)
   179  			*s = *value
   180  			arenaValues = append(arenaValues, s)
   181  		}
   182  		// Check integrity of allocated data.
   183  		for _, s := range arenaValues {
   184  			if *s != *value {
   185  				t.Errorf("failed integrity check: got %#v, want %#v", *s, *value)
   186  			}
   187  		}
   188  
   189  		// Release the arena.
   190  		arena.Free()
   191  	})
   192  }
   193  
   194  func runSubTestUserArenaSlice[S comparable](t *testing.T, value []S, parallel bool) {
   195  	t.Run("[]"+reflect.TypeOf(value).Elem().Name(), func(t *testing.T) {
   196  		if parallel {
   197  			t.Parallel()
   198  		}
   199  
   200  		// Allocate and write data, enough to exhaust the arena.
   201  		//
   202  		// This is an underestimate, likely leaving some space in the arena. That's a good thing,
   203  		// because it gives us coverage of boundary cases.
   204  		n := int(UserArenaChunkBytes / (unsafe.Sizeof(*new(S)) * uintptr(cap(value))))
   205  		if n == 0 {
   206  			n = 1
   207  		}
   208  
   209  		// Create a new arena and do a bunch of operations on it.
   210  		arena := NewUserArena()
   211  
   212  		arenaValues := make([][]S, 0, n)
   213  		for j := 0; j < n; j++ {
   214  			var sl []S
   215  			arena.Slice(&sl, cap(value))
   216  			copy(sl, value)
   217  			arenaValues = append(arenaValues, sl)
   218  		}
   219  		// Check integrity of allocated data.
   220  		for _, sl := range arenaValues {
   221  			for i := range sl {
   222  				got := sl[i]
   223  				want := value[i]
   224  				if got != want {
   225  					t.Errorf("failed integrity check: got %#v, want %#v at index %d", got, want, i)
   226  				}
   227  			}
   228  		}
   229  
   230  		// Release the arena.
   231  		arena.Free()
   232  	})
   233  }
   234  
   235  func TestUserArenaLiveness(t *testing.T) {
   236  	if Clobberfree() {
   237  		// This test crashes with SEGV in clobberfree in mgcsweep.go with GODEBUG=clobberfree=1.
   238  		t.Skip("triggers SEGV with GODEBUG=clobberfree=1")
   239  	}
   240  
   241  	t.Run("Free", func(t *testing.T) {
   242  		testUserArenaLiveness(t, false)
   243  	})
   244  	t.Run("Finalizer", func(t *testing.T) {
   245  		testUserArenaLiveness(t, true)
   246  	})
   247  }
   248  
   249  func testUserArenaLiveness(t *testing.T, useArenaFinalizer bool) {
   250  	// Disable the GC so that there's zero chance we try doing anything arena related *during*
   251  	// a mark phase, since otherwise a bunch of arenas could end up on the fault list.
   252  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   253  
   254  	// Defensively ensure that any full arena chunks leftover from previous tests have been cleared.
   255  	GC()
   256  	GC()
   257  
   258  	arena := NewUserArena()
   259  
   260  	// Allocate a few pointer-ful but un-initialized objects so that later we can
   261  	// place a reference to heap object at a more interesting location.
   262  	for i := 0; i < 3; i++ {
   263  		var x any
   264  		x = (*mediumPointerOdd)(nil)
   265  		arena.New(&x)
   266  	}
   267  
   268  	var x any
   269  	x = (*smallPointerMix)(nil)
   270  	arena.New(&x)
   271  	v := x.(*smallPointerMix)
   272  
   273  	var safeToFinalize atomic.Bool
   274  	var finalized atomic.Bool
   275  	v.C = new(smallPointer)
   276  	SetFinalizer(v.C, func(_ *smallPointer) {
   277  		if !safeToFinalize.Load() {
   278  			t.Error("finalized arena-referenced object unexpectedly")
   279  		}
   280  		finalized.Store(true)
   281  	})
   282  
   283  	// Make sure it stays alive.
   284  	GC()
   285  	GC()
   286  
   287  	// In order to ensure the object can be freed, we now need to make sure to use
   288  	// the entire arena. Exhaust the rest of the arena.
   289  
   290  	for i := 0; i < int(UserArenaChunkBytes/unsafe.Sizeof(mediumScalarEven{})); i++ {
   291  		var x any
   292  		x = (*mediumScalarEven)(nil)
   293  		arena.New(&x)
   294  	}
   295  
   296  	// Make sure it stays alive again.
   297  	GC()
   298  	GC()
   299  
   300  	v = nil
   301  
   302  	safeToFinalize.Store(true)
   303  	if useArenaFinalizer {
   304  		arena = nil
   305  
   306  		// Try to queue the arena finalizer.
   307  		GC()
   308  		GC()
   309  
   310  		// In order for the finalizer we actually want to run to execute,
   311  		// we need to make sure this one runs first.
   312  		if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) {
   313  			t.Fatal("finalizer queue was never emptied")
   314  		}
   315  	} else {
   316  		// Free the arena explicitly.
   317  		arena.Free()
   318  	}
   319  
   320  	// Try to queue the object's finalizer that we set earlier.
   321  	GC()
   322  	GC()
   323  
   324  	if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) {
   325  		t.Fatal("finalizer queue was never emptied")
   326  	}
   327  	if !finalized.Load() {
   328  		t.Error("expected arena-referenced object to be finalized")
   329  	}
   330  }
   331  
   332  func TestUserArenaClearsPointerBits(t *testing.T) {
   333  	if Clobberfree() {
   334  		// This test crashes with SEGV in clobberfree in mgcsweep.go with GODEBUG=clobberfree=1.
   335  		t.Skip("triggers SEGV with GODEBUG=clobberfree=1")
   336  	}
   337  
   338  	// This is a regression test for a serious issue wherein if pointer bits
   339  	// aren't properly cleared, it's possible to allocate scalar data down
   340  	// into a previously pointer-ful area, causing misinterpretation by the GC.
   341  
   342  	// Create a large object, grab a pointer into it, and free it.
   343  	x := new([8 << 20]byte)
   344  	xp := uintptr(unsafe.Pointer(&x[124]))
   345  	var finalized atomic.Bool
   346  	SetFinalizer(x, func(_ *[8 << 20]byte) {
   347  		finalized.Store(true)
   348  	})
   349  
   350  	// Write three chunks worth of pointer data. Three gives us a
   351  	// high likelihood that when we write 2 later, we'll get the behavior
   352  	// we want.
   353  	a := NewUserArena()
   354  	for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*3); i++ {
   355  		var x any
   356  		x = (*smallPointer)(nil)
   357  		a.New(&x)
   358  	}
   359  	a.Free()
   360  
   361  	// Recycle the arena chunks.
   362  	GC()
   363  	GC()
   364  
   365  	a = NewUserArena()
   366  	for i := 0; i < int(UserArenaChunkBytes/goarch.PtrSize*2); i++ {
   367  		var x any
   368  		x = (*smallScalar)(nil)
   369  		a.New(&x)
   370  		v := x.(*smallScalar)
   371  		// Write a pointer that should not keep x alive.
   372  		*v = smallScalar{xp}
   373  	}
   374  	KeepAlive(x)
   375  	x = nil
   376  
   377  	// Try to free x.
   378  	GC()
   379  	GC()
   380  
   381  	if !BlockUntilEmptyFinalizerQueue(int64(2 * time.Second)) {
   382  		t.Fatal("finalizer queue was never emptied")
   383  	}
   384  	if !finalized.Load() {
   385  		t.Fatal("heap allocation kept alive through non-pointer reference")
   386  	}
   387  
   388  	// Clean up the arena.
   389  	a.Free()
   390  	GC()
   391  	GC()
   392  }
   393  
   394  func TestUserArenaCloneString(t *testing.T) {
   395  	a := NewUserArena()
   396  
   397  	// A static string (not on heap or arena)
   398  	var s = "abcdefghij"
   399  
   400  	// Create a byte slice in the arena, initialize it with s
   401  	var b []byte
   402  	a.Slice(&b, len(s))
   403  	copy(b, s)
   404  
   405  	// Create a string as using the same memory as the byte slice, hence in
   406  	// the arena. This could be an arena API, but hasn't really been needed
   407  	// yet.
   408  	as := unsafe.String(&b[0], len(b))
   409  
   410  	// Clone should make a copy of as, since it is in the arena.
   411  	asCopy := UserArenaClone(as)
   412  	if unsafe.StringData(as) == unsafe.StringData(asCopy) {
   413  		t.Error("Clone did not make a copy")
   414  	}
   415  
   416  	// Clone should make a copy of subAs, since subAs is just part of as and so is in the arena.
   417  	subAs := as[1:3]
   418  	subAsCopy := UserArenaClone(subAs)
   419  	if unsafe.StringData(subAs) == unsafe.StringData(subAsCopy) {
   420  		t.Error("Clone did not make a copy")
   421  	}
   422  	if len(subAs) != len(subAsCopy) {
   423  		t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(subAs), len(subAsCopy))
   424  	} else {
   425  		for i := range subAs {
   426  			if subAs[i] != subAsCopy[i] {
   427  				t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, subAs[i], subAs[i])
   428  			}
   429  		}
   430  	}
   431  
   432  	// Clone should not make a copy of doubleAs, since doubleAs will be on the heap.
   433  	doubleAs := as + as
   434  	doubleAsCopy := UserArenaClone(doubleAs)
   435  	if unsafe.StringData(doubleAs) != unsafe.StringData(doubleAsCopy) {
   436  		t.Error("Clone should not have made a copy")
   437  	}
   438  
   439  	// Clone should not make a copy of s, since s is a static string.
   440  	sCopy := UserArenaClone(s)
   441  	if unsafe.StringData(s) != unsafe.StringData(sCopy) {
   442  		t.Error("Clone should not have made a copy")
   443  	}
   444  
   445  	a.Free()
   446  }
   447  
   448  func TestUserArenaClonePointer(t *testing.T) {
   449  	a := NewUserArena()
   450  
   451  	// Clone should not make a copy of a heap-allocated smallScalar.
   452  	x := Escape(new(smallScalar))
   453  	xCopy := UserArenaClone(x)
   454  	if unsafe.Pointer(x) != unsafe.Pointer(xCopy) {
   455  		t.Errorf("Clone should not have made a copy: %#v -> %#v", x, xCopy)
   456  	}
   457  
   458  	// Clone should make a copy of an arena-allocated smallScalar.
   459  	var i any
   460  	i = (*smallScalar)(nil)
   461  	a.New(&i)
   462  	xArena := i.(*smallScalar)
   463  	xArenaCopy := UserArenaClone(xArena)
   464  	if unsafe.Pointer(xArena) == unsafe.Pointer(xArenaCopy) {
   465  		t.Errorf("Clone should have made a copy: %#v -> %#v", xArena, xArenaCopy)
   466  	}
   467  	if *xArena != *xArenaCopy {
   468  		t.Errorf("Clone made an incorrect copy copy: %#v -> %#v", *xArena, *xArenaCopy)
   469  	}
   470  
   471  	a.Free()
   472  }
   473  
   474  func TestUserArenaCloneSlice(t *testing.T) {
   475  	a := NewUserArena()
   476  
   477  	// A static string (not on heap or arena)
   478  	var s = "klmnopqrstuv"
   479  
   480  	// Create a byte slice in the arena, initialize it with s
   481  	var b []byte
   482  	a.Slice(&b, len(s))
   483  	copy(b, s)
   484  
   485  	// Clone should make a copy of b, since it is in the arena.
   486  	bCopy := UserArenaClone(b)
   487  	if unsafe.Pointer(&b[0]) == unsafe.Pointer(&bCopy[0]) {
   488  		t.Errorf("Clone did not make a copy: %#v -> %#v", b, bCopy)
   489  	}
   490  	if len(b) != len(bCopy) {
   491  		t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(b), len(bCopy))
   492  	} else {
   493  		for i := range b {
   494  			if b[i] != bCopy[i] {
   495  				t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, b[i], bCopy[i])
   496  			}
   497  		}
   498  	}
   499  
   500  	// Clone should make a copy of bSub, since bSub is just part of b and so is in the arena.
   501  	bSub := b[1:3]
   502  	bSubCopy := UserArenaClone(bSub)
   503  	if unsafe.Pointer(&bSub[0]) == unsafe.Pointer(&bSubCopy[0]) {
   504  		t.Errorf("Clone did not make a copy: %#v -> %#v", bSub, bSubCopy)
   505  	}
   506  	if len(bSub) != len(bSubCopy) {
   507  		t.Errorf("Clone made an incorrect copy (bad length): %d -> %d", len(bSub), len(bSubCopy))
   508  	} else {
   509  		for i := range bSub {
   510  			if bSub[i] != bSubCopy[i] {
   511  				t.Errorf("Clone made an incorrect copy (data at index %d): %d -> %d", i, bSub[i], bSubCopy[i])
   512  			}
   513  		}
   514  	}
   515  
   516  	// Clone should not make a copy of bNotArena, since it will not be in an arena.
   517  	bNotArena := make([]byte, len(s))
   518  	copy(bNotArena, s)
   519  	bNotArenaCopy := UserArenaClone(bNotArena)
   520  	if unsafe.Pointer(&bNotArena[0]) != unsafe.Pointer(&bNotArenaCopy[0]) {
   521  		t.Error("Clone should not have made a copy")
   522  	}
   523  
   524  	a.Free()
   525  }
   526  
   527  func TestUserArenaClonePanic(t *testing.T) {
   528  	var s string
   529  	func() {
   530  		x := smallScalar{2}
   531  		defer func() {
   532  			if v := recover(); v != nil {
   533  				s = v.(string)
   534  			}
   535  		}()
   536  		UserArenaClone(x)
   537  	}()
   538  	if s == "" {
   539  		t.Errorf("expected panic from Clone")
   540  	}
   541  }
   542  

View as plain text