Source file
src/runtime/malloc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "flag"
9 "fmt"
10 "internal/asan"
11 "internal/goarch"
12 "internal/race"
13 "internal/testenv"
14 "os"
15 "os/exec"
16 "reflect"
17 "runtime"
18 . "runtime"
19 "strings"
20 "sync"
21 "sync/atomic"
22 "testing"
23 "time"
24 "unsafe"
25 )
26
27 var testMemStatsCount int
28
29 func TestMemStats(t *testing.T) {
30 testMemStatsCount++
31
32
33 GC()
34
35
36 st := new(MemStats)
37 ReadMemStats(st)
38
39 nz := func(x any) error {
40 if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
41 return nil
42 }
43 return fmt.Errorf("zero value")
44 }
45 le := func(thresh float64) func(any) error {
46 return func(x any) error {
47
48
49
50 if testMemStatsCount > 1 {
51 return nil
52 }
53
54 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
55 return nil
56 }
57 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
58 }
59 }
60 eq := func(x any) func(any) error {
61 return func(y any) error {
62 if x == y {
63 return nil
64 }
65 return fmt.Errorf("want %v", x)
66 }
67 }
68
69
70 fields := map[string][]func(any) error{
71 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
72 "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
73 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
74 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
75 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
76 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
77 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
78 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
79 "NextGC": {nz, le(1e10)}, "LastGC": {nz},
80 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
81 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
82 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
83 "BySize": nil,
84 }
85
86 rst := reflect.ValueOf(st).Elem()
87 for i := 0; i < rst.Type().NumField(); i++ {
88 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
89 checks, ok := fields[name]
90 if !ok {
91 t.Errorf("unknown MemStats field %s", name)
92 continue
93 }
94 for _, check := range checks {
95 if err := check(val); err != nil {
96 t.Errorf("%s = %v: %s", name, val, err)
97 }
98 }
99 }
100
101 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
102 st.BuckHashSys+st.GCSys+st.OtherSys {
103 t.Fatalf("Bad sys value: %+v", *st)
104 }
105
106 if st.HeapIdle+st.HeapInuse != st.HeapSys {
107 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
108 }
109
110 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
111 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
112 }
113
114 var pauseTotal uint64
115 for _, pause := range st.PauseNs {
116 pauseTotal += pause
117 }
118 if int(st.NumGC) < len(st.PauseNs) {
119
120 if st.PauseTotalNs != pauseTotal {
121 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
122 }
123 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
124 if st.PauseNs[i] != 0 {
125 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
126 }
127 if st.PauseEnd[i] != 0 {
128 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
129 }
130 }
131 } else {
132 if st.PauseTotalNs < pauseTotal {
133 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
134 }
135 }
136
137 if st.NumForcedGC > st.NumGC {
138 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
139 }
140 }
141
142 func TestStringConcatenationAllocs(t *testing.T) {
143 n := testing.AllocsPerRun(1e3, func() {
144 b := make([]byte, 10)
145 for i := 0; i < 10; i++ {
146 b[i] = byte(i) + '0'
147 }
148 s := "foo" + string(b)
149 if want := "foo0123456789"; s != want {
150 t.Fatalf("want %v, got %v", want, s)
151 }
152 })
153
154 if n != 1 {
155 t.Fatalf("want 1 allocation, got %v", n)
156 }
157 }
158
159 func TestTinyAlloc(t *testing.T) {
160 if runtime.Raceenabled {
161 t.Skip("tinyalloc suppressed when running in race mode")
162 }
163 if asan.Enabled {
164 t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
165 }
166 const N = 16
167 var v [N]unsafe.Pointer
168 for i := range v {
169 v[i] = unsafe.Pointer(new(byte))
170 }
171
172 chunks := make(map[uintptr]bool, N)
173 for _, p := range v {
174 chunks[uintptr(p)&^7] = true
175 }
176
177 if len(chunks) == N {
178 t.Fatal("no bytes allocated within the same 8-byte chunk")
179 }
180 }
181
182 type obj12 struct {
183 a uint64
184 b uint32
185 }
186
187 func TestTinyAllocIssue37262(t *testing.T) {
188 if runtime.Raceenabled {
189 t.Skip("tinyalloc suppressed when running in race mode")
190 }
191 if asan.Enabled {
192 t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
193 }
194
195
196
197
198
199
200
201 runtime.GC()
202 runtime.GC()
203
204
205
206 runtime.Acquirem()
207
208
209 aligned := false
210 for i := 0; i < 16; i++ {
211 x := runtime.Escape(new(byte))
212 if uintptr(unsafe.Pointer(x))&0xf == 0xf {
213 aligned = true
214 break
215 }
216 }
217 if !aligned {
218 runtime.Releasem()
219 t.Fatal("unable to get a fresh tiny slot")
220 }
221
222
223
224 runtime.Escape(new(uint32))
225
226
227
228
229
230
231 tinyObj12 := runtime.Escape(new(obj12))
232
233
234 atomic.StoreUint64(&tinyObj12.a, 10)
235
236 runtime.Releasem()
237 }
238
239
240 func TestFreegc(t *testing.T) {
241 tests := []struct {
242 size string
243 f func(noscan bool) func(*testing.T)
244 noscan bool
245 }{
246
247 {"size=16", testFreegc[[16]byte], true},
248 {"size=17", testFreegc[[17]byte], true},
249 {"size=64", testFreegc[[64]byte], true},
250 {"size=500", testFreegc[[500]byte], true},
251 {"size=512", testFreegc[[512]byte], true},
252 {"size=4096", testFreegc[[4096]byte], true},
253 {"size=20000", testFreegc[[20000]byte], true},
254 {"size=32KiB-8", testFreegc[[1<<15 - 8]byte], true},
255 }
256
257
258
259
260 for _, tt := range tests {
261 runtime.GC()
262 t.Run(fmt.Sprintf("gc=yes/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
263 }
264 runtime.GC()
265
266 if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
267 return
268 }
269
270
271
272 for _, tt := range tests {
273 t.Run(fmt.Sprintf("gc=no/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
274 }
275 runtime.GC()
276 }
277
278 func testFreegc[T comparable](noscan bool) func(*testing.T) {
279
280
281
282
283
284 stressMultiple := 10
285 if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
286 stressMultiple = 1
287 }
288
289 return func(t *testing.T) {
290 alloc := func() *T {
291
292 t.Helper()
293 p := Escape(new(T))
294 var zero T
295 if *p != zero {
296 t.Fatalf("allocator returned non-zero memory: %v", *p)
297 }
298 return p
299 }
300
301 free := func(p *T) {
302 t.Helper()
303 var zero T
304 if *p != zero {
305 t.Fatalf("found non-zero memory before freegc (tests do not modify memory): %v", *p)
306 }
307 runtime.Freegc(unsafe.Pointer(p), unsafe.Sizeof(*p), noscan)
308 }
309
310 t.Run("basic-free", func(t *testing.T) {
311
312 for range 100 {
313 p := alloc()
314 free(p)
315 }
316 })
317
318 t.Run("stack-free", func(t *testing.T) {
319
320 for range 100 {
321 var x [32]byte
322 var y [32]*int
323 runtime.Freegc(unsafe.Pointer(&x), unsafe.Sizeof(x), true)
324 runtime.Freegc(unsafe.Pointer(&y), unsafe.Sizeof(y), false)
325 }
326 })
327
328
329
330
331
332
333
334
335 t.Run("allocs-baseline", func(t *testing.T) {
336
337 allocs := testing.AllocsPerRun(100, func() {
338 for range 100 {
339 p := alloc()
340 _ = p
341 }
342 })
343 if allocs < 100 {
344
345
346 t.Fatalf("expected >=100 allocations, got %v", allocs)
347 }
348 })
349
350 t.Run("allocs-with-free", func(t *testing.T) {
351
352
353 if SizeSpecializedMallocEnabled && !noscan {
354
355
356 t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
357 }
358 if !RuntimeFreegcEnabled {
359 t.Skip("skipping alloc tests with runtime.freegc disabled")
360 }
361 allocs := testing.AllocsPerRun(100, func() {
362 for range 100 {
363 p := alloc()
364 free(p)
365 }
366 })
367 if allocs != 0 {
368 t.Fatalf("expected 0 allocations, got %v", allocs)
369 }
370 })
371
372 t.Run("free-multiple", func(t *testing.T) {
373
374
375
376 if SizeSpecializedMallocEnabled && !noscan {
377
378
379 t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
380 }
381 if !RuntimeFreegcEnabled {
382 t.Skip("skipping alloc tests with runtime.freegc disabled")
383 }
384 const maxOutstanding = 20
385 s := make([]*T, 0, maxOutstanding)
386 allocs := testing.AllocsPerRun(100*stressMultiple, func() {
387 s = s[:0]
388 for range maxOutstanding {
389 p := alloc()
390 s = append(s, p)
391 }
392 for _, p := range s {
393 free(p)
394 }
395 })
396 if allocs != 0 {
397 t.Fatalf("expected 0 allocations, got %v", allocs)
398 }
399 })
400
401 if runtime.GOARCH == "wasm" {
402
403
404
405
406
407 t.Skip("skipping remaining freegc tests, was timing out on wasm")
408 }
409
410 t.Run("free-many", func(t *testing.T) {
411
412
413 s := make([]*T, 0, 1000)
414 iterations := stressMultiple * stressMultiple
415 for range iterations {
416 s = s[:0]
417 for range 1000 {
418 p := alloc()
419 s = append(s, p)
420 }
421 for _, p := range s {
422 free(p)
423 }
424 }
425 })
426
427 t.Run("duplicate-check", func(t *testing.T) {
428
429
430
431
432 live := make(map[uintptr]bool)
433 for i := range 100 * stressMultiple {
434 var s []*T
435
436 for j := range 10 {
437 p := alloc()
438 uptr := uintptr(unsafe.Pointer(p))
439 if live[uptr] {
440 t.Fatalf("found duplicate pointer (0x%x). i: %d j: %d", uptr, i, j)
441 }
442 live[uptr] = true
443 s = append(s, p)
444 }
445
446 for k := range s {
447 p := s[k]
448 s[k] = nil
449 uptr := uintptr(unsafe.Pointer(p))
450 free(p)
451 delete(live, uptr)
452 }
453 }
454 })
455
456 t.Run("free-other-goroutine", func(t *testing.T) {
457
458
459
460 iterations := 10 * stressMultiple * stressMultiple
461 for _, capacity := range []int{2} {
462 for range iterations {
463 ch := make(chan *T, capacity)
464 var wg sync.WaitGroup
465 for range 2 {
466 wg.Add(1)
467 go func() {
468 defer wg.Done()
469 for p := range ch {
470 free(p)
471 }
472 }()
473 }
474 for range 100 {
475 p := alloc()
476 ch <- p
477 }
478 close(ch)
479 wg.Wait()
480 }
481 }
482 })
483
484 t.Run("many-goroutines", func(t *testing.T) {
485
486
487 counts := []int{1, 2, 4, 8, 10 * stressMultiple}
488 for _, goroutines := range counts {
489 var wg sync.WaitGroup
490 for range goroutines {
491 wg.Add(1)
492 go func() {
493 defer wg.Done()
494 live := make(map[uintptr]bool)
495 for range 100 * stressMultiple {
496 p := alloc()
497 uptr := uintptr(unsafe.Pointer(p))
498 if live[uptr] {
499 panic("TestFreeLive: found duplicate pointer")
500 }
501 live[uptr] = true
502 free(p)
503 delete(live, uptr)
504 }
505 }()
506 }
507 wg.Wait()
508 }
509 })
510
511 t.Run("assist-credit", func(t *testing.T) {
512
513
514
515
516
517
518
519
520
521
522 if SizeSpecializedMallocEnabled && !noscan {
523
524
525 t.Skip("temporarily skip assist credit tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
526 }
527 if !RuntimeFreegcEnabled {
528 t.Skip("skipping assist credit test with runtime.freegc disabled")
529 }
530
531
532 done := make(chan struct{})
533 defer close(done)
534 go func() {
535 for {
536 select {
537 case <-done:
538 return
539 default:
540 runtime.GC()
541 }
542 }
543 }()
544
545
546
547 counts := []int{1, 2, 10, 100 * stressMultiple}
548
549 defer GOMAXPROCS(GOMAXPROCS(1))
550 size := int64(unsafe.Sizeof(*new(T)))
551 for _, count := range counts {
552
553
554 runtime.GC()
555 for i := range count {
556
557
558 Acquirem()
559
560
561
562
563 p := alloc()
564 free(p)
565
566
567
568 creditStart := AssistCredit()
569 blackenStart := GcBlackenEnable()
570 p = alloc()
571 blackenAfterAlloc := GcBlackenEnable()
572 creditAfterAlloc := AssistCredit()
573 free(p)
574 blackenEnd := GcBlackenEnable()
575 creditEnd := AssistCredit()
576
577 Releasem()
578 GoschedIfBusy()
579
580 delta := creditEnd - creditStart
581 if delta != 0 {
582 t.Logf("assist credit non-zero delta: %d", delta)
583 t.Logf("\t| size: %d i: %d count: %d", size, i, count)
584 t.Logf("\t| credit before: %d credit after: %d", creditStart, creditEnd)
585 t.Logf("\t| alloc delta: %d free delta: %d",
586 creditAfterAlloc-creditStart, creditEnd-creditAfterAlloc)
587 t.Logf("\t| gcBlackenEnable (start / after alloc / end): %v/%v/%v",
588 blackenStart, blackenAfterAlloc, blackenEnd)
589 t.FailNow()
590 }
591 }
592 }
593 })
594 }
595 }
596
597 func TestPageCacheLeak(t *testing.T) {
598 defer GOMAXPROCS(GOMAXPROCS(1))
599 leaked := PageCachePagesLeaked()
600 if leaked != 0 {
601 t.Fatalf("found %d leaked pages in page caches", leaked)
602 }
603 }
604
605 func TestPhysicalMemoryUtilization(t *testing.T) {
606 got := runTestProg(t, "testprog", "GCPhys")
607 want := "OK\n"
608 if got != want {
609 t.Fatalf("expected %q, but got %q", want, got)
610 }
611 }
612
613 func TestScavengedBitsCleared(t *testing.T) {
614 var mismatches [128]BitsMismatch
615 if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
616 t.Errorf("uncleared scavenged bits")
617 for _, m := range mismatches[:n] {
618 t.Logf("\t@ address 0x%x", m.Base)
619 t.Logf("\t| got: %064b", m.Got)
620 t.Logf("\t| want: %064b", m.Want)
621 }
622 t.FailNow()
623 }
624 }
625
626 type acLink struct {
627 x [1 << 20]byte
628 }
629
630 var arenaCollisionSink []*acLink
631
632 func TestArenaCollision(t *testing.T) {
633
634
635 if os.Getenv("TEST_ARENA_COLLISION") != "1" {
636 cmd := testenv.CleanCmdEnv(exec.Command(testenv.Executable(t), "-test.run=^TestArenaCollision$", "-test.v"))
637 cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
638 out, err := cmd.CombinedOutput()
639 if race.Enabled {
640
641
642
643
644
645 if want := "too many address space collisions"; !strings.Contains(string(out), want) {
646 t.Fatalf("want %q, got:\n%s", want, string(out))
647 }
648 } else if !strings.Contains(string(out), "PASS\n") || err != nil {
649 t.Fatalf("%s\n(exit status %v)", string(out), err)
650 }
651 return
652 }
653 disallowed := [][2]uintptr{}
654
655
656 KeepNArenaHints(3)
657
658
659 for i := 0; i < 5; i++ {
660
661
662 start, end, ok := MapNextArenaHint()
663 if !ok {
664 t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end)
665 }
666 t.Logf("reserved [%#x, %#x)", start, end)
667 disallowed = append(disallowed, [2]uintptr{start, end})
668
669 hint, ok := NextArenaHint()
670 if !ok {
671
672
673
674
675 t.Skip("ran out of arena hints")
676 }
677
678
679
680 for {
681 if next, ok := NextArenaHint(); !ok {
682 t.Skip("ran out of arena hints")
683 } else if next != hint {
684 break
685 }
686 ac := new(acLink)
687 arenaCollisionSink = append(arenaCollisionSink, ac)
688
689
690 p := uintptr(unsafe.Pointer(ac))
691 for _, d := range disallowed {
692 if d[0] <= p && p < d[1] {
693 t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
694 }
695 }
696 }
697 }
698 }
699
700 func BenchmarkMalloc8(b *testing.B) {
701 for i := 0; i < b.N; i++ {
702 p := new(int64)
703 Escape(p)
704 }
705 }
706
707 func BenchmarkMalloc16(b *testing.B) {
708 for i := 0; i < b.N; i++ {
709 p := new([2]int64)
710 Escape(p)
711 }
712 }
713
714 func BenchmarkMalloc32(b *testing.B) {
715 for i := 0; i < b.N; i++ {
716 p := new([4]int64)
717 Escape(p)
718 }
719 }
720
721 func BenchmarkMallocTypeInfo8(b *testing.B) {
722 for i := 0; i < b.N; i++ {
723 p := new(struct {
724 p [8 / unsafe.Sizeof(uintptr(0))]*int
725 })
726 Escape(p)
727 }
728 }
729
730 func BenchmarkMallocTypeInfo16(b *testing.B) {
731 for i := 0; i < b.N; i++ {
732 p := new(struct {
733 p [16 / unsafe.Sizeof(uintptr(0))]*int
734 })
735 Escape(p)
736 }
737 }
738
739 func BenchmarkMallocTypeInfo32(b *testing.B) {
740 for i := 0; i < b.N; i++ {
741 p := new(struct {
742 p [32 / unsafe.Sizeof(uintptr(0))]*int
743 })
744 Escape(p)
745 }
746 }
747
748 type LargeStruct struct {
749 x [16][]byte
750 }
751
752 func BenchmarkMallocLargeStruct(b *testing.B) {
753 for i := 0; i < b.N; i++ {
754 p := make([]LargeStruct, 2)
755 Escape(p)
756 }
757 }
758
759 var n = flag.Int("n", 1000, "number of goroutines")
760
761 func BenchmarkGoroutineSelect(b *testing.B) {
762 quit := make(chan struct{})
763 read := func(ch chan struct{}) {
764 for {
765 select {
766 case _, ok := <-ch:
767 if !ok {
768 return
769 }
770 case <-quit:
771 return
772 }
773 }
774 }
775 benchHelper(b, *n, read)
776 }
777
778 func BenchmarkGoroutineBlocking(b *testing.B) {
779 read := func(ch chan struct{}) {
780 for {
781 if _, ok := <-ch; !ok {
782 return
783 }
784 }
785 }
786 benchHelper(b, *n, read)
787 }
788
789 func BenchmarkGoroutineForRange(b *testing.B) {
790 read := func(ch chan struct{}) {
791 for range ch {
792 }
793 }
794 benchHelper(b, *n, read)
795 }
796
797 func benchHelper(b *testing.B, n int, read func(chan struct{})) {
798 m := make([]chan struct{}, n)
799 for i := range m {
800 m[i] = make(chan struct{}, 1)
801 go read(m[i])
802 }
803 b.StopTimer()
804 b.ResetTimer()
805 GC()
806
807 for i := 0; i < b.N; i++ {
808 for _, ch := range m {
809 if ch != nil {
810 ch <- struct{}{}
811 }
812 }
813 time.Sleep(10 * time.Millisecond)
814 b.StartTimer()
815 GC()
816 b.StopTimer()
817 }
818
819 for _, ch := range m {
820 close(ch)
821 }
822 time.Sleep(10 * time.Millisecond)
823 }
824
825 func BenchmarkGoroutineIdle(b *testing.B) {
826 quit := make(chan struct{})
827 fn := func() {
828 <-quit
829 }
830 for i := 0; i < *n; i++ {
831 go fn()
832 }
833
834 GC()
835 b.ResetTimer()
836
837 for i := 0; i < b.N; i++ {
838 GC()
839 }
840
841 b.StopTimer()
842 close(quit)
843 time.Sleep(10 * time.Millisecond)
844 }
845
846 func TestMkmalloc(t *testing.T) {
847 testenv.MustHaveGoRun(t)
848 testenv.MustHaveExternalNetwork(t)
849 output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput()
850 t.Logf("test output:\n%s", output)
851 if err != nil {
852 t.Errorf("_mkmalloc tests failed: %v", err)
853 }
854 }
855
856 func TestScanAllocIssue77573(t *testing.T) {
857 if asan.Enabled {
858 t.Skip("extra allocations with -asan causes this to fail")
859 }
860 verifyScanAlloc := func(t *testing.T, f func(), expectSize uintptr) {
861 runtime.Acquirem()
862 defer runtime.Releasem()
863 for i := 0; i < 100; i++ {
864 before := runtime.GetScanAlloc()
865 f()
866 after := runtime.GetScanAlloc()
867
868
869 if after > before {
870 actualSize := after - before
871 if actualSize != expectSize {
872 t.Errorf("wrong GC Scan Alloc Size:\nwant %+v\ngot %+v", expectSize, actualSize)
873 }
874 return
875 }
876 }
877 t.Error("always refill, it still fails after running multiple times")
878 }
879 t.Run("heap slice ([]*int, 1)", func(t *testing.T) {
880 verifyScanAlloc(t, func() { runtime.Escape(make([]*int, 1)) }, goarch.PtrSize)
881 })
882 t.Run("heap slice ([]*int, 2)", func(t *testing.T) {
883 verifyScanAlloc(t, func() { runtime.Escape(make([]*int, 2)) }, 2*goarch.PtrSize)
884 })
885 t.Run("heap slice ([]*int, 3)", func(t *testing.T) {
886 verifyScanAlloc(t, func() { runtime.Escape(make([]*int, 3)) }, 3*goarch.PtrSize)
887 })
888 }
889
View as plain text