Source file
src/runtime/gc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/asan"
10 "internal/msan"
11 "internal/race"
12 "internal/testenv"
13 "math/bits"
14 "math/rand"
15 "os"
16 "reflect"
17 "runtime"
18 "runtime/debug"
19 "slices"
20 "strings"
21 "sync"
22 "sync/atomic"
23 "testing"
24 "time"
25 "unsafe"
26 "weak"
27 )
28
29 func TestGcSys(t *testing.T) {
30 t.Skip("skipping known-flaky test; golang.org/issue/37331")
31 if os.Getenv("GOGC") == "off" {
32 t.Skip("skipping test; GOGC=off in environment")
33 }
34 got := runTestProg(t, "testprog", "GCSys")
35 want := "OK\n"
36 if got != want {
37 t.Fatalf("expected %q, but got %q", want, got)
38 }
39 }
40
41 func TestGcDeepNesting(t *testing.T) {
42 type T [2][2][2][2][2][2][2][2][2][2]*int
43 a := new(T)
44
45
46
47 t.Logf("%p", a)
48
49 a[0][0][0][0][0][0][0][0][0][0] = new(int)
50 *a[0][0][0][0][0][0][0][0][0][0] = 13
51 runtime.GC()
52 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
53 t.Fail()
54 }
55 }
56
57 func TestGcMapIndirection(t *testing.T) {
58 defer debug.SetGCPercent(debug.SetGCPercent(1))
59 runtime.GC()
60 type T struct {
61 a [256]int
62 }
63 m := make(map[T]T)
64 for i := 0; i < 2000; i++ {
65 var a T
66 a.a[0] = i
67 m[a] = T{}
68 }
69 }
70
71 func TestGcArraySlice(t *testing.T) {
72 type X struct {
73 buf [1]byte
74 nextbuf []byte
75 next *X
76 }
77 var head *X
78 for i := 0; i < 10; i++ {
79 p := &X{}
80 p.buf[0] = 42
81 p.next = head
82 if head != nil {
83 p.nextbuf = head.buf[:]
84 }
85 head = p
86 runtime.GC()
87 }
88 for p := head; p != nil; p = p.next {
89 if p.buf[0] != 42 {
90 t.Fatal("corrupted heap")
91 }
92 }
93 }
94
95 func TestGcRescan(t *testing.T) {
96 type X struct {
97 c chan error
98 nextx *X
99 }
100 type Y struct {
101 X
102 nexty *Y
103 p *int
104 }
105 var head *Y
106 for i := 0; i < 10; i++ {
107 p := &Y{}
108 p.c = make(chan error)
109 if head != nil {
110 p.nextx = &head.X
111 }
112 p.nexty = head
113 p.p = new(int)
114 *p.p = 42
115 head = p
116 runtime.GC()
117 }
118 for p := head; p != nil; p = p.nexty {
119 if *p.p != 42 {
120 t.Fatal("corrupted heap")
121 }
122 }
123 }
124
125 func TestGcLastTime(t *testing.T) {
126 ms := new(runtime.MemStats)
127 t0 := time.Now().UnixNano()
128 runtime.GC()
129 t1 := time.Now().UnixNano()
130 runtime.ReadMemStats(ms)
131 last := int64(ms.LastGC)
132 if t0 > last || last > t1 {
133 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
134 }
135 pause := ms.PauseNs[(ms.NumGC+255)%256]
136
137
138 if pause == 0 {
139 t.Logf("last GC pause was 0")
140 } else if pause > 10e9 {
141 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
142 }
143 }
144
145 var hugeSink any
146
147 func TestHugeGCInfo(t *testing.T) {
148
149
150 if hugeSink != nil {
151
152 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
153 hugeSink = new([n]*byte)
154 hugeSink = new([n]uintptr)
155 hugeSink = new(struct {
156 x float64
157 y [n]*byte
158 z []string
159 })
160 hugeSink = new(struct {
161 x float64
162 y [n]uintptr
163 z []string
164 })
165 }
166 }
167
168 func TestPeriodicGC(t *testing.T) {
169 if runtime.GOARCH == "wasm" {
170 t.Skip("no sysmon on wasm yet")
171 }
172
173
174 runtime.GC()
175
176 var ms1, ms2 runtime.MemStats
177 runtime.ReadMemStats(&ms1)
178
179
180 orig := *runtime.ForceGCPeriod
181 *runtime.ForceGCPeriod = 0
182
183
184
185
186
187 var numGCs uint32
188 const want = 2
189 for i := 0; i < 200 && numGCs < want; i++ {
190 time.Sleep(5 * time.Millisecond)
191
192
193 runtime.ReadMemStats(&ms2)
194 numGCs = ms2.NumGC - ms1.NumGC
195 }
196 *runtime.ForceGCPeriod = orig
197
198 if numGCs < want {
199 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
200 }
201 }
202
203 func TestGcZombieReporting(t *testing.T) {
204 if asan.Enabled || msan.Enabled || race.Enabled {
205 t.Skip("skipped test: checkptr mode catches the issue before getting to zombie reporting")
206 }
207
208
209
210
211 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
212 want := "found pointer to free object"
213 if !strings.Contains(got, want) {
214 t.Fatalf("expected %q in output, but got %q", want, got)
215 }
216 }
217
218 func TestGCTestMoveStackOnNextCall(t *testing.T) {
219 if asan.Enabled {
220 t.Skip("extra allocations with -asan causes this to fail; see #70079")
221 }
222 t.Parallel()
223 var onStack int
224
225
226
227 for retry := 0; retry < 5; retry++ {
228 runtime.GCTestMoveStackOnNextCall()
229 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
230
231 return
232 }
233 }
234 t.Fatal("stack did not move")
235 }
236
237
238
239
240
241 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
242
243
244
245
246
247 new2 := uintptr(unsafe.Pointer(new))
248
249 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
250 if new2 == old {
251
252 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
253 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
254 }
255
256 return false
257 }
258 return true
259 }
260
261 func TestGCTestMoveStackRepeatedly(t *testing.T) {
262
263
264 for i := 0; i < 100; i++ {
265 runtime.GCTestMoveStackOnNextCall()
266 moveStack1(false)
267 }
268 }
269
270
271 func moveStack1(x bool) {
272
273 if x {
274 println("x")
275 }
276 }
277
278 func TestGCTestIsReachable(t *testing.T) {
279 var all, half []unsafe.Pointer
280 var want uint64
281 for i := 0; i < 16; i++ {
282
283
284 p := unsafe.Pointer(new(*int))
285 all = append(all, p)
286 if i%2 == 0 {
287 half = append(half, p)
288 want |= 1 << i
289 }
290 }
291
292 got := runtime.GCTestIsReachable(all...)
293 if got&want != want {
294
295
296 t.Fatalf("live object not in reachable set; want %b, got %b", want, got)
297 }
298 if bits.OnesCount64(got&^want) > 1 {
299
300
301
302
303 t.Fatalf("dead object in reachable set; want %b, got %b", want, got)
304 }
305 runtime.KeepAlive(half)
306 }
307
308 var pointerClassBSS *int
309 var pointerClassData = 42
310
311 func TestGCTestPointerClass(t *testing.T) {
312 if asan.Enabled {
313 t.Skip("extra allocations cause this test to fail; see #70079")
314 }
315 t.Parallel()
316 check := func(p unsafe.Pointer, want string) {
317 t.Helper()
318 got := runtime.GCTestPointerClass(p)
319 if got != want {
320
321
322 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
323 }
324 }
325 var onStack int
326 var notOnStack int
327 check(unsafe.Pointer(&onStack), "stack")
328 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
329 check(unsafe.Pointer(&pointerClassBSS), "bss")
330 check(unsafe.Pointer(&pointerClassData), "data")
331 check(nil, "other")
332 }
333
334 func BenchmarkAllocation(b *testing.B) {
335 type T struct {
336 x, y *byte
337 }
338 ngo := runtime.GOMAXPROCS(0)
339 work := make(chan bool, b.N+ngo)
340 result := make(chan *T)
341 for i := 0; i < b.N; i++ {
342 work <- true
343 }
344 for i := 0; i < ngo; i++ {
345 work <- false
346 }
347 for i := 0; i < ngo; i++ {
348 go func() {
349 var x *T
350 for <-work {
351 for i := 0; i < 1000; i++ {
352 x = &T{}
353 }
354 }
355 result <- x
356 }()
357 }
358 for i := 0; i < ngo; i++ {
359 <-result
360 }
361 }
362
363 func TestPrintGC(t *testing.T) {
364 if testing.Short() {
365 t.Skip("Skipping in short mode")
366 }
367 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
368 done := make(chan bool)
369 go func() {
370 for {
371 select {
372 case <-done:
373 return
374 default:
375 runtime.GC()
376 }
377 }
378 }()
379 for i := 0; i < 1e4; i++ {
380 func() {
381 defer print("")
382 }()
383 }
384 close(done)
385 }
386
387 func testTypeSwitch(x any) error {
388 switch y := x.(type) {
389 case nil:
390
391 case error:
392 return y
393 }
394 return nil
395 }
396
397 func testAssert(x any) error {
398 if y, ok := x.(error); ok {
399 return y
400 }
401 return nil
402 }
403
404 func testAssertVar(x any) error {
405 var y, ok = x.(error)
406 if ok {
407 return y
408 }
409 return nil
410 }
411
412 var a bool
413
414
415 func testIfaceEqual(x any) {
416 if x == "abc" {
417 a = true
418 }
419 }
420
421 func TestPageAccounting(t *testing.T) {
422
423
424
425 const blockSize = 64 << 10
426 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
427 for i := range blocks {
428 blocks[i] = new([blockSize]byte)
429 }
430
431
432 pagesInUse, counted := runtime.CountPagesInUse()
433 if pagesInUse != counted {
434 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
435 }
436 }
437
438 func init() {
439
440 *runtime.DoubleCheckReadMemStats = true
441 }
442
443 func TestReadMemStats(t *testing.T) {
444 base, slow := runtime.ReadMemStatsSlow()
445 if base != slow {
446 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
447 t.Fatal("memstats mismatch")
448 }
449 }
450
451 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
452 typ := got.Type()
453 switch typ.Kind() {
454 case reflect.Array, reflect.Slice:
455 if got.Len() != want.Len() {
456 t.Logf("len(%s): got %v, want %v", prefix, got, want)
457 return
458 }
459 for i := 0; i < got.Len(); i++ {
460 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
461 }
462 case reflect.Struct:
463 for i := 0; i < typ.NumField(); i++ {
464 gf, wf := got.Field(i), want.Field(i)
465 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
466 }
467 case reflect.Map:
468 t.Fatal("not implemented: logDiff for map")
469 default:
470 if got.Interface() != want.Interface() {
471 t.Logf("%s: got %v, want %v", prefix, got, want)
472 }
473 }
474 }
475
476 func BenchmarkReadMemStats(b *testing.B) {
477 var ms runtime.MemStats
478 const heapSize = 100 << 20
479 x := make([]*[1024]byte, heapSize/1024)
480 for i := range x {
481 x[i] = new([1024]byte)
482 }
483
484 b.ResetTimer()
485 for i := 0; i < b.N; i++ {
486 runtime.ReadMemStats(&ms)
487 }
488
489 runtime.KeepAlive(x)
490 }
491
492 func applyGCLoad(b *testing.B) func() {
493
494
495
496
497 maxProcs := runtime.GOMAXPROCS(-1)
498 if maxProcs == 1 {
499 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
500 }
501
502
503 type node struct {
504 children [16]*node
505 }
506 var buildTree func(depth int) *node
507 buildTree = func(depth int) *node {
508 tree := new(node)
509 if depth != 0 {
510 for i := range tree.children {
511 tree.children[i] = buildTree(depth - 1)
512 }
513 }
514 return tree
515 }
516
517
518 done := make(chan struct{})
519 var wg sync.WaitGroup
520 for i := 0; i < maxProcs-1; i++ {
521 wg.Add(1)
522 go func() {
523 defer wg.Done()
524 var hold *node
525 loop:
526 for {
527 hold = buildTree(5)
528 select {
529 case <-done:
530 break loop
531 default:
532 }
533 }
534 runtime.KeepAlive(hold)
535 }()
536 }
537 return func() {
538 close(done)
539 wg.Wait()
540 }
541 }
542
543 func BenchmarkReadMemStatsLatency(b *testing.B) {
544 stop := applyGCLoad(b)
545
546
547 latencies := make([]time.Duration, 0, 1024)
548
549
550
551 b.ResetTimer()
552 var ms runtime.MemStats
553 for i := 0; i < b.N; i++ {
554
555
556 time.Sleep(100 * time.Millisecond)
557 start := time.Now()
558 runtime.ReadMemStats(&ms)
559 latencies = append(latencies, time.Since(start))
560 }
561
562
563
564 b.StopTimer()
565 stop()
566
567
568
569
570 b.ReportMetric(0, "ns/op")
571 b.ReportMetric(0, "B/op")
572 b.ReportMetric(0, "allocs/op")
573
574
575 slices.Sort(latencies)
576 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
577 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
578 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
579 }
580
581 func TestUserForcedGC(t *testing.T) {
582
583 defer debug.SetGCPercent(debug.SetGCPercent(-1))
584
585 var ms1, ms2 runtime.MemStats
586 runtime.ReadMemStats(&ms1)
587 runtime.GC()
588 runtime.ReadMemStats(&ms2)
589 if ms1.NumGC == ms2.NumGC {
590 t.Fatalf("runtime.GC() did not trigger GC")
591 }
592 if ms1.NumForcedGC == ms2.NumForcedGC {
593 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
594 }
595 }
596
597 func writeBarrierBenchmark(b *testing.B, f func()) {
598 runtime.GC()
599 var ms runtime.MemStats
600 runtime.ReadMemStats(&ms)
601
602
603
604
605 var stop uint32
606 done := make(chan bool)
607 go func() {
608 for atomic.LoadUint32(&stop) == 0 {
609 runtime.GC()
610 }
611 close(done)
612 }()
613 defer func() {
614 atomic.StoreUint32(&stop, 1)
615 <-done
616 }()
617
618 b.ResetTimer()
619 f()
620 b.StopTimer()
621 }
622
623 func BenchmarkWriteBarrier(b *testing.B) {
624 if runtime.GOMAXPROCS(-1) < 2 {
625
626 b.Skip("need GOMAXPROCS >= 2")
627 }
628
629
630
631 type node struct {
632 l, r *node
633 }
634 var wbRoots []*node
635 var mkTree func(level int) *node
636 mkTree = func(level int) *node {
637 if level == 0 {
638 return nil
639 }
640 n := &node{mkTree(level - 1), mkTree(level - 1)}
641 if level == 10 {
642
643
644
645 wbRoots = append(wbRoots, n)
646 }
647 return n
648 }
649 const depth = 22
650 root := mkTree(22)
651
652 writeBarrierBenchmark(b, func() {
653 var stack [depth]*node
654 tos := -1
655
656
657 for i := 0; i < b.N; i += 2 {
658 if tos == -1 {
659 stack[0] = root
660 tos = 0
661 }
662
663
664 n := stack[tos]
665 if n.l == nil {
666 tos--
667 } else {
668 n.l, n.r = n.r, n.l
669 stack[tos] = n.l
670 stack[tos+1] = n.r
671 tos++
672 }
673
674 if i%(1<<12) == 0 {
675
676 runtime.Gosched()
677 }
678 }
679 })
680
681 runtime.KeepAlive(wbRoots)
682 }
683
684 func BenchmarkBulkWriteBarrier(b *testing.B) {
685 if runtime.GOMAXPROCS(-1) < 2 {
686
687 b.Skip("need GOMAXPROCS >= 2")
688 }
689
690
691 const heapSize = 64 << 20
692 type obj [16]*byte
693 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
694 for i := range ptrs {
695 ptrs[i] = new(obj)
696 }
697
698 writeBarrierBenchmark(b, func() {
699 const blockSize = 1024
700 var pos int
701 for i := 0; i < b.N; i += blockSize {
702
703 block := ptrs[pos : pos+blockSize]
704 first := block[0]
705 copy(block, block[1:])
706 block[blockSize-1] = first
707
708 pos += blockSize
709 if pos+blockSize > len(ptrs) {
710 pos = 0
711 }
712
713 runtime.Gosched()
714 }
715 })
716
717 runtime.KeepAlive(ptrs)
718 }
719
720 func BenchmarkScanStackNoLocals(b *testing.B) {
721 var ready sync.WaitGroup
722 teardown := make(chan bool)
723 for j := 0; j < 10; j++ {
724 ready.Add(1)
725 go func() {
726 x := 100000
727 countpwg(&x, &ready, teardown)
728 }()
729 }
730 ready.Wait()
731 b.ResetTimer()
732 for i := 0; i < b.N; i++ {
733 b.StartTimer()
734 runtime.GC()
735 runtime.GC()
736 b.StopTimer()
737 }
738 close(teardown)
739 }
740
741 func BenchmarkMSpanCountAlloc(b *testing.B) {
742
743 s := runtime.AllocMSpan()
744 defer runtime.FreeMSpan(s)
745
746
747
748
749 for _, n := range []int{8, 16, 32, 64, 128} {
750 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
751
752 bits := make([]byte, n)
753 rand.Read(bits)
754
755 b.ResetTimer()
756 for i := 0; i < b.N; i++ {
757 runtime.MSpanCountAlloc(s, bits)
758 }
759 })
760 }
761 }
762
763 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
764 if *n == 0 {
765 ready.Done()
766 <-teardown
767 return
768 }
769 *n--
770 countpwg(n, ready, teardown)
771 }
772
773 func TestMemoryLimit(t *testing.T) {
774 if testing.Short() {
775 t.Skip("stress test that takes time to run")
776 }
777 if runtime.NumCPU() < 4 {
778 t.Skip("want at least 4 CPUs for this test")
779 }
780 got := runTestProg(t, "testprog", "GCMemoryLimit")
781 want := "OK\n"
782 if got != want {
783 t.Fatalf("expected %q, but got %q", want, got)
784 }
785 }
786
787 func TestMemoryLimitNoGCPercent(t *testing.T) {
788 if testing.Short() {
789 t.Skip("stress test that takes time to run")
790 }
791 if runtime.NumCPU() < 4 {
792 t.Skip("want at least 4 CPUs for this test")
793 }
794 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
795 want := "OK\n"
796 if got != want {
797 t.Fatalf("expected %q, but got %q", want, got)
798 }
799 }
800
801 func TestMyGenericFunc(t *testing.T) {
802 runtime.MyGenericFunc[int]()
803 }
804
805 func TestWeakToStrongMarkTermination(t *testing.T) {
806 testenv.MustHaveParallelism(t)
807
808 type T struct {
809 a *int
810 b int
811 }
812 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
813 defer debug.SetGCPercent(debug.SetGCPercent(-1))
814 w := make([]weak.Pointer[T], 2048)
815
816
817 runtime.GC()
818
819
820 for i := range w {
821 x := new(T)
822 x.a = new(int)
823 w[i] = weak.Make(x)
824 }
825
826
827 runtime.GCMarkDoneResetRestartFlag()
828
829
830 runtime.SetSpinInGCMarkDone(true)
831
832
833
834
835
836 done := make(chan struct{})
837 go func() {
838 runtime.GC()
839 done <- struct{}{}
840 }()
841 go func() {
842
843
844
845
846 runtime.Usleep(100000)
847
848
849 runtime.SetSpinInGCMarkDone(false)
850 }()
851 time.Sleep(10 * time.Millisecond)
852
853
854 var wg sync.WaitGroup
855 for _, wp := range w {
856 wg.Add(1)
857 go func() {
858 defer wg.Done()
859 wp.Value()
860 }()
861 }
862
863
864 <-done
865
866
867 wg.Wait()
868
869
870
871
872
873
874
875
876
877
878
879 if runtime.GCMarkDoneRestarted() {
880 t.Errorf("gcMarkDone restarted")
881 }
882 }
883
884 func TestMSpanQueue(t *testing.T) {
885 expectSize := func(t *testing.T, q *runtime.MSpanQueue, want int) {
886 t.Helper()
887 if got := q.Size(); got != want {
888 t.Errorf("expected size %d, got %d", want, got)
889 }
890 }
891 expectMSpan := func(t *testing.T, got, want *runtime.MSpan, op string) {
892 t.Helper()
893 if got != want {
894 t.Errorf("expected mspan %p from %s, got %p", want, op, got)
895 }
896 }
897 makeSpans := func(t *testing.T, n int) ([]*runtime.MSpan, func()) {
898 t.Helper()
899 spans := make([]*runtime.MSpan, 0, n)
900 for range cap(spans) {
901 spans = append(spans, runtime.AllocMSpan())
902 }
903 return spans, func() {
904 for i, s := range spans {
905 runtime.FreeMSpan(s)
906 spans[i] = nil
907 }
908 }
909 }
910 t.Run("Empty", func(t *testing.T) {
911 var q runtime.MSpanQueue
912 expectSize(t, &q, 0)
913 expectMSpan(t, q.Pop(), nil, "pop")
914 })
915 t.Run("PushPop", func(t *testing.T) {
916 s := runtime.AllocMSpan()
917 defer runtime.FreeMSpan(s)
918
919 var q runtime.MSpanQueue
920 q.Push(s)
921 expectSize(t, &q, 1)
922 expectMSpan(t, q.Pop(), s, "pop")
923 expectMSpan(t, q.Pop(), nil, "pop")
924 })
925 t.Run("PushPopPushPop", func(t *testing.T) {
926 s0 := runtime.AllocMSpan()
927 defer runtime.FreeMSpan(s0)
928 s1 := runtime.AllocMSpan()
929 defer runtime.FreeMSpan(s1)
930
931 var q runtime.MSpanQueue
932
933
934 q.Push(s0)
935 expectSize(t, &q, 1)
936 expectMSpan(t, q.Pop(), s0, "pop")
937 expectMSpan(t, q.Pop(), nil, "pop")
938
939
940 q.Push(s1)
941 expectSize(t, &q, 1)
942 expectMSpan(t, q.Pop(), s1, "pop")
943 expectMSpan(t, q.Pop(), nil, "pop")
944 })
945 t.Run("PushPushPopPop", func(t *testing.T) {
946 s0 := runtime.AllocMSpan()
947 defer runtime.FreeMSpan(s0)
948 s1 := runtime.AllocMSpan()
949 defer runtime.FreeMSpan(s1)
950
951 var q runtime.MSpanQueue
952 q.Push(s0)
953 expectSize(t, &q, 1)
954 q.Push(s1)
955 expectSize(t, &q, 2)
956 expectMSpan(t, q.Pop(), s0, "pop")
957 expectMSpan(t, q.Pop(), s1, "pop")
958 expectMSpan(t, q.Pop(), nil, "pop")
959 })
960 t.Run("EmptyTakeAll", func(t *testing.T) {
961 var q runtime.MSpanQueue
962 var p runtime.MSpanQueue
963 expectSize(t, &p, 0)
964 expectSize(t, &q, 0)
965 p.TakeAll(&q)
966 expectSize(t, &p, 0)
967 expectSize(t, &q, 0)
968 expectMSpan(t, q.Pop(), nil, "pop")
969 expectMSpan(t, p.Pop(), nil, "pop")
970 })
971 t.Run("Push4TakeAll", func(t *testing.T) {
972 spans, free := makeSpans(t, 4)
973 defer free()
974
975 var q runtime.MSpanQueue
976 for i, s := range spans {
977 expectSize(t, &q, i)
978 q.Push(s)
979 expectSize(t, &q, i+1)
980 }
981
982 var p runtime.MSpanQueue
983 p.TakeAll(&q)
984 expectSize(t, &p, 4)
985 for i := range p.Size() {
986 expectMSpan(t, p.Pop(), spans[i], "pop")
987 }
988 expectSize(t, &p, 0)
989 expectMSpan(t, q.Pop(), nil, "pop")
990 expectMSpan(t, p.Pop(), nil, "pop")
991 })
992 t.Run("Push4Pop3", func(t *testing.T) {
993 spans, free := makeSpans(t, 4)
994 defer free()
995
996 var q runtime.MSpanQueue
997 for i, s := range spans {
998 expectSize(t, &q, i)
999 q.Push(s)
1000 expectSize(t, &q, i+1)
1001 }
1002 p := q.PopN(3)
1003 expectSize(t, &p, 3)
1004 expectSize(t, &q, 1)
1005 for i := range p.Size() {
1006 expectMSpan(t, p.Pop(), spans[i], "pop")
1007 }
1008 expectMSpan(t, q.Pop(), spans[len(spans)-1], "pop")
1009 expectSize(t, &p, 0)
1010 expectSize(t, &q, 0)
1011 expectMSpan(t, q.Pop(), nil, "pop")
1012 expectMSpan(t, p.Pop(), nil, "pop")
1013 })
1014 t.Run("Push4Pop0", func(t *testing.T) {
1015 spans, free := makeSpans(t, 4)
1016 defer free()
1017
1018 var q runtime.MSpanQueue
1019 for i, s := range spans {
1020 expectSize(t, &q, i)
1021 q.Push(s)
1022 expectSize(t, &q, i+1)
1023 }
1024 p := q.PopN(0)
1025 expectSize(t, &p, 0)
1026 expectSize(t, &q, 4)
1027 for i := range q.Size() {
1028 expectMSpan(t, q.Pop(), spans[i], "pop")
1029 }
1030 expectSize(t, &p, 0)
1031 expectSize(t, &q, 0)
1032 expectMSpan(t, q.Pop(), nil, "pop")
1033 expectMSpan(t, p.Pop(), nil, "pop")
1034 })
1035 t.Run("Push4Pop4", func(t *testing.T) {
1036 spans, free := makeSpans(t, 4)
1037 defer free()
1038
1039 var q runtime.MSpanQueue
1040 for i, s := range spans {
1041 expectSize(t, &q, i)
1042 q.Push(s)
1043 expectSize(t, &q, i+1)
1044 }
1045 p := q.PopN(4)
1046 expectSize(t, &p, 4)
1047 expectSize(t, &q, 0)
1048 for i := range p.Size() {
1049 expectMSpan(t, p.Pop(), spans[i], "pop")
1050 }
1051 expectSize(t, &p, 0)
1052 expectMSpan(t, q.Pop(), nil, "pop")
1053 expectMSpan(t, p.Pop(), nil, "pop")
1054 })
1055 t.Run("Push4Pop5", func(t *testing.T) {
1056 spans, free := makeSpans(t, 4)
1057 defer free()
1058
1059 var q runtime.MSpanQueue
1060 for i, s := range spans {
1061 expectSize(t, &q, i)
1062 q.Push(s)
1063 expectSize(t, &q, i+1)
1064 }
1065 p := q.PopN(5)
1066 expectSize(t, &p, 4)
1067 expectSize(t, &q, 0)
1068 for i := range p.Size() {
1069 expectMSpan(t, p.Pop(), spans[i], "pop")
1070 }
1071 expectSize(t, &p, 0)
1072 expectMSpan(t, q.Pop(), nil, "pop")
1073 expectMSpan(t, p.Pop(), nil, "pop")
1074 })
1075 }
1076
1077 func TestDetectFinalizerAndCleanupLeaks(t *testing.T) {
1078 got := runTestProg(t, "testprog", "DetectFinalizerAndCleanupLeaks", "GODEBUG=checkfinalizers=1")
1079 sp := strings.SplitN(got, "detected possible issues with cleanups and/or finalizers", 2)
1080 if len(sp) != 2 {
1081 t.Fatalf("expected the runtime to throw, got:\n%s", got)
1082 }
1083 if strings.Count(sp[0], "is reachable from") != 2 {
1084 t.Fatalf("expected exactly two leaked cleanups and/or finalizers, got:\n%s", got)
1085 }
1086
1087 wantSymbolizedLocations := 2
1088 if !race.Enabled && !asan.Enabled {
1089 if strings.Count(sp[0], "is in a tiny block") != 1 {
1090 t.Fatalf("expected exactly one report for allocation in a tiny block, got:\n%s", got)
1091 }
1092 wantSymbolizedLocations++
1093 }
1094 if strings.Count(sp[0], "main.DetectFinalizerAndCleanupLeaks()") != wantSymbolizedLocations {
1095 t.Fatalf("expected %d symbolized locations, got:\n%s", wantSymbolizedLocations, got)
1096 }
1097 }
1098
View as plain text