Source file
src/runtime/gc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "math/rand"
10 "os"
11 "reflect"
12 "runtime"
13 "runtime/debug"
14 "sort"
15 "strings"
16 "sync"
17 "sync/atomic"
18 "testing"
19 "time"
20 "unsafe"
21 )
22
23 func TestGcSys(t *testing.T) {
24 t.Skip("skipping known-flaky test; golang.org/issue/37331")
25 if os.Getenv("GOGC") == "off" {
26 t.Skip("skipping test; GOGC=off in environment")
27 }
28 got := runTestProg(t, "testprog", "GCSys")
29 want := "OK\n"
30 if got != want {
31 t.Fatalf("expected %q, but got %q", want, got)
32 }
33 }
34
35 func TestGcDeepNesting(t *testing.T) {
36 type T [2][2][2][2][2][2][2][2][2][2]*int
37 a := new(T)
38
39
40
41 t.Logf("%p", a)
42
43 a[0][0][0][0][0][0][0][0][0][0] = new(int)
44 *a[0][0][0][0][0][0][0][0][0][0] = 13
45 runtime.GC()
46 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
47 t.Fail()
48 }
49 }
50
51 func TestGcMapIndirection(t *testing.T) {
52 defer debug.SetGCPercent(debug.SetGCPercent(1))
53 runtime.GC()
54 type T struct {
55 a [256]int
56 }
57 m := make(map[T]T)
58 for i := 0; i < 2000; i++ {
59 var a T
60 a.a[0] = i
61 m[a] = T{}
62 }
63 }
64
65 func TestGcArraySlice(t *testing.T) {
66 type X struct {
67 buf [1]byte
68 nextbuf []byte
69 next *X
70 }
71 var head *X
72 for i := 0; i < 10; i++ {
73 p := &X{}
74 p.buf[0] = 42
75 p.next = head
76 if head != nil {
77 p.nextbuf = head.buf[:]
78 }
79 head = p
80 runtime.GC()
81 }
82 for p := head; p != nil; p = p.next {
83 if p.buf[0] != 42 {
84 t.Fatal("corrupted heap")
85 }
86 }
87 }
88
89 func TestGcRescan(t *testing.T) {
90 type X struct {
91 c chan error
92 nextx *X
93 }
94 type Y struct {
95 X
96 nexty *Y
97 p *int
98 }
99 var head *Y
100 for i := 0; i < 10; i++ {
101 p := &Y{}
102 p.c = make(chan error)
103 if head != nil {
104 p.nextx = &head.X
105 }
106 p.nexty = head
107 p.p = new(int)
108 *p.p = 42
109 head = p
110 runtime.GC()
111 }
112 for p := head; p != nil; p = p.nexty {
113 if *p.p != 42 {
114 t.Fatal("corrupted heap")
115 }
116 }
117 }
118
119 func TestGcLastTime(t *testing.T) {
120 ms := new(runtime.MemStats)
121 t0 := time.Now().UnixNano()
122 runtime.GC()
123 t1 := time.Now().UnixNano()
124 runtime.ReadMemStats(ms)
125 last := int64(ms.LastGC)
126 if t0 > last || last > t1 {
127 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
128 }
129 pause := ms.PauseNs[(ms.NumGC+255)%256]
130
131
132 if pause == 0 {
133 t.Logf("last GC pause was 0")
134 } else if pause > 10e9 {
135 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
136 }
137 }
138
139 var hugeSink any
140
141 func TestHugeGCInfo(t *testing.T) {
142
143
144 if hugeSink != nil {
145
146 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
147 hugeSink = new([n]*byte)
148 hugeSink = new([n]uintptr)
149 hugeSink = new(struct {
150 x float64
151 y [n]*byte
152 z []string
153 })
154 hugeSink = new(struct {
155 x float64
156 y [n]uintptr
157 z []string
158 })
159 }
160 }
161
162 func TestPeriodicGC(t *testing.T) {
163 if runtime.GOARCH == "wasm" {
164 t.Skip("no sysmon on wasm yet")
165 }
166
167
168 runtime.GC()
169
170 var ms1, ms2 runtime.MemStats
171 runtime.ReadMemStats(&ms1)
172
173
174 orig := *runtime.ForceGCPeriod
175 *runtime.ForceGCPeriod = 0
176
177
178
179
180
181 var numGCs uint32
182 const want = 2
183 for i := 0; i < 200 && numGCs < want; i++ {
184 time.Sleep(5 * time.Millisecond)
185
186
187 runtime.ReadMemStats(&ms2)
188 numGCs = ms2.NumGC - ms1.NumGC
189 }
190 *runtime.ForceGCPeriod = orig
191
192 if numGCs < want {
193 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
194 }
195 }
196
197 func TestGcZombieReporting(t *testing.T) {
198
199
200
201
202 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
203 want := "found pointer to free object"
204 if !strings.Contains(got, want) {
205 t.Fatalf("expected %q in output, but got %q", want, got)
206 }
207 }
208
209 func TestGCTestMoveStackOnNextCall(t *testing.T) {
210 t.Parallel()
211 var onStack int
212
213
214
215 for retry := 0; retry < 5; retry++ {
216 runtime.GCTestMoveStackOnNextCall()
217 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
218
219 return
220 }
221 }
222 t.Fatal("stack did not move")
223 }
224
225
226
227
228
229 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
230
231
232
233
234
235 new2 := uintptr(unsafe.Pointer(new))
236
237 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
238 if new2 == old {
239
240 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
241 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
242 }
243
244 return false
245 }
246 return true
247 }
248
249 func TestGCTestMoveStackRepeatedly(t *testing.T) {
250
251
252 for i := 0; i < 100; i++ {
253 runtime.GCTestMoveStackOnNextCall()
254 moveStack1(false)
255 }
256 }
257
258
259 func moveStack1(x bool) {
260
261 if x {
262 println("x")
263 }
264 }
265
266 func TestGCTestIsReachable(t *testing.T) {
267 var all, half []unsafe.Pointer
268 var want uint64
269 for i := 0; i < 16; i++ {
270
271
272 p := unsafe.Pointer(new(*int))
273 all = append(all, p)
274 if i%2 == 0 {
275 half = append(half, p)
276 want |= 1 << i
277 }
278 }
279
280 got := runtime.GCTestIsReachable(all...)
281 if want != got {
282 t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
283 }
284 runtime.KeepAlive(half)
285 }
286
287 var pointerClassBSS *int
288 var pointerClassData = 42
289
290 func TestGCTestPointerClass(t *testing.T) {
291 t.Parallel()
292 check := func(p unsafe.Pointer, want string) {
293 t.Helper()
294 got := runtime.GCTestPointerClass(p)
295 if got != want {
296
297
298 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
299 }
300 }
301 var onStack int
302 var notOnStack int
303 check(unsafe.Pointer(&onStack), "stack")
304 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
305 check(unsafe.Pointer(&pointerClassBSS), "bss")
306 check(unsafe.Pointer(&pointerClassData), "data")
307 check(nil, "other")
308 }
309
310 func BenchmarkAllocation(b *testing.B) {
311 type T struct {
312 x, y *byte
313 }
314 ngo := runtime.GOMAXPROCS(0)
315 work := make(chan bool, b.N+ngo)
316 result := make(chan *T)
317 for i := 0; i < b.N; i++ {
318 work <- true
319 }
320 for i := 0; i < ngo; i++ {
321 work <- false
322 }
323 for i := 0; i < ngo; i++ {
324 go func() {
325 var x *T
326 for <-work {
327 for i := 0; i < 1000; i++ {
328 x = &T{}
329 }
330 }
331 result <- x
332 }()
333 }
334 for i := 0; i < ngo; i++ {
335 <-result
336 }
337 }
338
339 func TestPrintGC(t *testing.T) {
340 if testing.Short() {
341 t.Skip("Skipping in short mode")
342 }
343 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
344 done := make(chan bool)
345 go func() {
346 for {
347 select {
348 case <-done:
349 return
350 default:
351 runtime.GC()
352 }
353 }
354 }()
355 for i := 0; i < 1e4; i++ {
356 func() {
357 defer print("")
358 }()
359 }
360 close(done)
361 }
362
363 func testTypeSwitch(x any) error {
364 switch y := x.(type) {
365 case nil:
366
367 case error:
368 return y
369 }
370 return nil
371 }
372
373 func testAssert(x any) error {
374 if y, ok := x.(error); ok {
375 return y
376 }
377 return nil
378 }
379
380 func testAssertVar(x any) error {
381 var y, ok = x.(error)
382 if ok {
383 return y
384 }
385 return nil
386 }
387
388 var a bool
389
390
391 func testIfaceEqual(x any) {
392 if x == "abc" {
393 a = true
394 }
395 }
396
397 func TestPageAccounting(t *testing.T) {
398
399
400
401 const blockSize = 64 << 10
402 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
403 for i := range blocks {
404 blocks[i] = new([blockSize]byte)
405 }
406
407
408 pagesInUse, counted := runtime.CountPagesInUse()
409 if pagesInUse != counted {
410 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
411 }
412 }
413
414 func init() {
415
416 *runtime.DoubleCheckReadMemStats = true
417 }
418
419 func TestReadMemStats(t *testing.T) {
420 base, slow := runtime.ReadMemStatsSlow()
421 if base != slow {
422 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
423 t.Fatal("memstats mismatch")
424 }
425 }
426
427 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
428 typ := got.Type()
429 switch typ.Kind() {
430 case reflect.Array, reflect.Slice:
431 if got.Len() != want.Len() {
432 t.Logf("len(%s): got %v, want %v", prefix, got, want)
433 return
434 }
435 for i := 0; i < got.Len(); i++ {
436 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
437 }
438 case reflect.Struct:
439 for i := 0; i < typ.NumField(); i++ {
440 gf, wf := got.Field(i), want.Field(i)
441 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
442 }
443 case reflect.Map:
444 t.Fatal("not implemented: logDiff for map")
445 default:
446 if got.Interface() != want.Interface() {
447 t.Logf("%s: got %v, want %v", prefix, got, want)
448 }
449 }
450 }
451
452 func BenchmarkReadMemStats(b *testing.B) {
453 var ms runtime.MemStats
454 const heapSize = 100 << 20
455 x := make([]*[1024]byte, heapSize/1024)
456 for i := range x {
457 x[i] = new([1024]byte)
458 }
459
460 b.ResetTimer()
461 for i := 0; i < b.N; i++ {
462 runtime.ReadMemStats(&ms)
463 }
464
465 runtime.KeepAlive(x)
466 }
467
468 func applyGCLoad(b *testing.B) func() {
469
470
471
472
473 maxProcs := runtime.GOMAXPROCS(-1)
474 if maxProcs == 1 {
475 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
476 }
477
478
479 type node struct {
480 children [16]*node
481 }
482 var buildTree func(depth int) *node
483 buildTree = func(depth int) *node {
484 tree := new(node)
485 if depth != 0 {
486 for i := range tree.children {
487 tree.children[i] = buildTree(depth - 1)
488 }
489 }
490 return tree
491 }
492
493
494 done := make(chan struct{})
495 var wg sync.WaitGroup
496 for i := 0; i < maxProcs-1; i++ {
497 wg.Add(1)
498 go func() {
499 defer wg.Done()
500 var hold *node
501 loop:
502 for {
503 hold = buildTree(5)
504 select {
505 case <-done:
506 break loop
507 default:
508 }
509 }
510 runtime.KeepAlive(hold)
511 }()
512 }
513 return func() {
514 close(done)
515 wg.Wait()
516 }
517 }
518
519 func BenchmarkReadMemStatsLatency(b *testing.B) {
520 stop := applyGCLoad(b)
521
522
523 latencies := make([]time.Duration, 0, 1024)
524
525
526
527 b.ResetTimer()
528 var ms runtime.MemStats
529 for i := 0; i < b.N; i++ {
530
531
532 time.Sleep(100 * time.Millisecond)
533 start := time.Now()
534 runtime.ReadMemStats(&ms)
535 latencies = append(latencies, time.Since(start))
536 }
537
538
539
540 b.StopTimer()
541 stop()
542
543
544
545
546 b.ReportMetric(0, "ns/op")
547 b.ReportMetric(0, "B/op")
548 b.ReportMetric(0, "allocs/op")
549
550
551 sort.Slice(latencies, func(i, j int) bool {
552 return latencies[i] < latencies[j]
553 })
554 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
555 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
556 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
557 }
558
559 func TestUserForcedGC(t *testing.T) {
560
561 defer debug.SetGCPercent(debug.SetGCPercent(-1))
562
563 var ms1, ms2 runtime.MemStats
564 runtime.ReadMemStats(&ms1)
565 runtime.GC()
566 runtime.ReadMemStats(&ms2)
567 if ms1.NumGC == ms2.NumGC {
568 t.Fatalf("runtime.GC() did not trigger GC")
569 }
570 if ms1.NumForcedGC == ms2.NumForcedGC {
571 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
572 }
573 }
574
575 func writeBarrierBenchmark(b *testing.B, f func()) {
576 runtime.GC()
577 var ms runtime.MemStats
578 runtime.ReadMemStats(&ms)
579
580
581
582
583 var stop uint32
584 done := make(chan bool)
585 go func() {
586 for atomic.LoadUint32(&stop) == 0 {
587 runtime.GC()
588 }
589 close(done)
590 }()
591 defer func() {
592 atomic.StoreUint32(&stop, 1)
593 <-done
594 }()
595
596 b.ResetTimer()
597 f()
598 b.StopTimer()
599 }
600
601 func BenchmarkWriteBarrier(b *testing.B) {
602 if runtime.GOMAXPROCS(-1) < 2 {
603
604 b.Skip("need GOMAXPROCS >= 2")
605 }
606
607
608
609 type node struct {
610 l, r *node
611 }
612 var wbRoots []*node
613 var mkTree func(level int) *node
614 mkTree = func(level int) *node {
615 if level == 0 {
616 return nil
617 }
618 n := &node{mkTree(level - 1), mkTree(level - 1)}
619 if level == 10 {
620
621
622
623 wbRoots = append(wbRoots, n)
624 }
625 return n
626 }
627 const depth = 22
628 root := mkTree(22)
629
630 writeBarrierBenchmark(b, func() {
631 var stack [depth]*node
632 tos := -1
633
634
635 for i := 0; i < b.N; i += 2 {
636 if tos == -1 {
637 stack[0] = root
638 tos = 0
639 }
640
641
642 n := stack[tos]
643 if n.l == nil {
644 tos--
645 } else {
646 n.l, n.r = n.r, n.l
647 stack[tos] = n.l
648 stack[tos+1] = n.r
649 tos++
650 }
651
652 if i%(1<<12) == 0 {
653
654 runtime.Gosched()
655 }
656 }
657 })
658
659 runtime.KeepAlive(wbRoots)
660 }
661
662 func BenchmarkBulkWriteBarrier(b *testing.B) {
663 if runtime.GOMAXPROCS(-1) < 2 {
664
665 b.Skip("need GOMAXPROCS >= 2")
666 }
667
668
669 const heapSize = 64 << 20
670 type obj [16]*byte
671 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
672 for i := range ptrs {
673 ptrs[i] = new(obj)
674 }
675
676 writeBarrierBenchmark(b, func() {
677 const blockSize = 1024
678 var pos int
679 for i := 0; i < b.N; i += blockSize {
680
681 block := ptrs[pos : pos+blockSize]
682 first := block[0]
683 copy(block, block[1:])
684 block[blockSize-1] = first
685
686 pos += blockSize
687 if pos+blockSize > len(ptrs) {
688 pos = 0
689 }
690
691 runtime.Gosched()
692 }
693 })
694
695 runtime.KeepAlive(ptrs)
696 }
697
698 func BenchmarkScanStackNoLocals(b *testing.B) {
699 var ready sync.WaitGroup
700 teardown := make(chan bool)
701 for j := 0; j < 10; j++ {
702 ready.Add(1)
703 go func() {
704 x := 100000
705 countpwg(&x, &ready, teardown)
706 }()
707 }
708 ready.Wait()
709 b.ResetTimer()
710 for i := 0; i < b.N; i++ {
711 b.StartTimer()
712 runtime.GC()
713 runtime.GC()
714 b.StopTimer()
715 }
716 close(teardown)
717 }
718
719 func BenchmarkMSpanCountAlloc(b *testing.B) {
720
721 s := runtime.AllocMSpan()
722 defer runtime.FreeMSpan(s)
723
724
725
726
727 for _, n := range []int{8, 16, 32, 64, 128} {
728 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
729
730 bits := make([]byte, n)
731 rand.Read(bits)
732
733 b.ResetTimer()
734 for i := 0; i < b.N; i++ {
735 runtime.MSpanCountAlloc(s, bits)
736 }
737 })
738 }
739 }
740
741 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
742 if *n == 0 {
743 ready.Done()
744 <-teardown
745 return
746 }
747 *n--
748 countpwg(n, ready, teardown)
749 }
750
751 func TestMemoryLimit(t *testing.T) {
752 if testing.Short() {
753 t.Skip("stress test that takes time to run")
754 }
755 if runtime.NumCPU() < 4 {
756 t.Skip("want at least 4 CPUs for this test")
757 }
758 got := runTestProg(t, "testprog", "GCMemoryLimit")
759 want := "OK\n"
760 if got != want {
761 t.Fatalf("expected %q, but got %q", want, got)
762 }
763 }
764
765 func TestMemoryLimitNoGCPercent(t *testing.T) {
766 if testing.Short() {
767 t.Skip("stress test that takes time to run")
768 }
769 if runtime.NumCPU() < 4 {
770 t.Skip("want at least 4 CPUs for this test")
771 }
772 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
773 want := "OK\n"
774 if got != want {
775 t.Fatalf("expected %q, but got %q", want, got)
776 }
777 }
778
779 func TestMyGenericFunc(t *testing.T) {
780 runtime.MyGenericFunc[int]()
781 }
782
View as plain text