Source file
src/runtime/malloc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "flag"
9 "fmt"
10 "internal/asan"
11 "internal/race"
12 "internal/testenv"
13 "os"
14 "os/exec"
15 "reflect"
16 "runtime"
17 . "runtime"
18 "strings"
19 "sync"
20 "sync/atomic"
21 "testing"
22 "time"
23 "unsafe"
24 )
25
26 var testMemStatsCount int
27
28 func TestMemStats(t *testing.T) {
29 testMemStatsCount++
30
31
32 GC()
33
34
35 st := new(MemStats)
36 ReadMemStats(st)
37
38 nz := func(x any) error {
39 if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
40 return nil
41 }
42 return fmt.Errorf("zero value")
43 }
44 le := func(thresh float64) func(any) error {
45 return func(x any) error {
46
47
48
49 if testMemStatsCount > 1 {
50 return nil
51 }
52
53 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
54 return nil
55 }
56 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
57 }
58 }
59 eq := func(x any) func(any) error {
60 return func(y any) error {
61 if x == y {
62 return nil
63 }
64 return fmt.Errorf("want %v", x)
65 }
66 }
67
68
69 fields := map[string][]func(any) error{
70 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
71 "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
72 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
73 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
74 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
75 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
76 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
77 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
78 "NextGC": {nz, le(1e10)}, "LastGC": {nz},
79 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
80 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
81 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
82 "BySize": nil,
83 }
84
85 rst := reflect.ValueOf(st).Elem()
86 for i := 0; i < rst.Type().NumField(); i++ {
87 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
88 checks, ok := fields[name]
89 if !ok {
90 t.Errorf("unknown MemStats field %s", name)
91 continue
92 }
93 for _, check := range checks {
94 if err := check(val); err != nil {
95 t.Errorf("%s = %v: %s", name, val, err)
96 }
97 }
98 }
99
100 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
101 st.BuckHashSys+st.GCSys+st.OtherSys {
102 t.Fatalf("Bad sys value: %+v", *st)
103 }
104
105 if st.HeapIdle+st.HeapInuse != st.HeapSys {
106 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
107 }
108
109 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
110 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
111 }
112
113 var pauseTotal uint64
114 for _, pause := range st.PauseNs {
115 pauseTotal += pause
116 }
117 if int(st.NumGC) < len(st.PauseNs) {
118
119 if st.PauseTotalNs != pauseTotal {
120 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
121 }
122 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
123 if st.PauseNs[i] != 0 {
124 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
125 }
126 if st.PauseEnd[i] != 0 {
127 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
128 }
129 }
130 } else {
131 if st.PauseTotalNs < pauseTotal {
132 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
133 }
134 }
135
136 if st.NumForcedGC > st.NumGC {
137 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
138 }
139 }
140
141 func TestStringConcatenationAllocs(t *testing.T) {
142 n := testing.AllocsPerRun(1e3, func() {
143 b := make([]byte, 10)
144 for i := 0; i < 10; i++ {
145 b[i] = byte(i) + '0'
146 }
147 s := "foo" + string(b)
148 if want := "foo0123456789"; s != want {
149 t.Fatalf("want %v, got %v", want, s)
150 }
151 })
152
153 if n != 1 {
154 t.Fatalf("want 1 allocation, got %v", n)
155 }
156 }
157
158 func TestTinyAlloc(t *testing.T) {
159 if runtime.Raceenabled {
160 t.Skip("tinyalloc suppressed when running in race mode")
161 }
162 if asan.Enabled {
163 t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
164 }
165 const N = 16
166 var v [N]unsafe.Pointer
167 for i := range v {
168 v[i] = unsafe.Pointer(new(byte))
169 }
170
171 chunks := make(map[uintptr]bool, N)
172 for _, p := range v {
173 chunks[uintptr(p)&^7] = true
174 }
175
176 if len(chunks) == N {
177 t.Fatal("no bytes allocated within the same 8-byte chunk")
178 }
179 }
180
181 type obj12 struct {
182 a uint64
183 b uint32
184 }
185
186 func TestTinyAllocIssue37262(t *testing.T) {
187 if runtime.Raceenabled {
188 t.Skip("tinyalloc suppressed when running in race mode")
189 }
190 if asan.Enabled {
191 t.Skip("tinyalloc suppressed when running in asan mode due to redzone")
192 }
193
194
195
196
197
198
199
200 runtime.GC()
201 runtime.GC()
202
203
204
205 runtime.Acquirem()
206
207
208 aligned := false
209 for i := 0; i < 16; i++ {
210 x := runtime.Escape(new(byte))
211 if uintptr(unsafe.Pointer(x))&0xf == 0xf {
212 aligned = true
213 break
214 }
215 }
216 if !aligned {
217 runtime.Releasem()
218 t.Fatal("unable to get a fresh tiny slot")
219 }
220
221
222
223 runtime.Escape(new(uint32))
224
225
226
227
228
229
230 tinyObj12 := runtime.Escape(new(obj12))
231
232
233 atomic.StoreUint64(&tinyObj12.a, 10)
234
235 runtime.Releasem()
236 }
237
238
239 func TestFreegc(t *testing.T) {
240 tests := []struct {
241 size string
242 f func(noscan bool) func(*testing.T)
243 noscan bool
244 }{
245
246 {"size=16", testFreegc[[16]byte], true},
247 {"size=17", testFreegc[[17]byte], true},
248 {"size=64", testFreegc[[64]byte], true},
249 {"size=500", testFreegc[[500]byte], true},
250 {"size=512", testFreegc[[512]byte], true},
251 {"size=4096", testFreegc[[4096]byte], true},
252 {"size=20000", testFreegc[[20000]byte], true},
253 {"size=32KiB-8", testFreegc[[1<<15 - 8]byte], true},
254 }
255
256
257
258
259 for _, tt := range tests {
260 runtime.GC()
261 t.Run(fmt.Sprintf("gc=yes/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
262 }
263 runtime.GC()
264
265 if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
266 return
267 }
268
269
270
271 for _, tt := range tests {
272 t.Run(fmt.Sprintf("gc=no/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan))
273 }
274 runtime.GC()
275 }
276
277 func testFreegc[T comparable](noscan bool) func(*testing.T) {
278
279
280
281
282
283 stressMultiple := 10
284 if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled {
285 stressMultiple = 1
286 }
287
288 return func(t *testing.T) {
289 alloc := func() *T {
290
291 t.Helper()
292 p := Escape(new(T))
293 var zero T
294 if *p != zero {
295 t.Fatalf("allocator returned non-zero memory: %v", *p)
296 }
297 return p
298 }
299
300 free := func(p *T) {
301 t.Helper()
302 var zero T
303 if *p != zero {
304 t.Fatalf("found non-zero memory before freegc (tests do not modify memory): %v", *p)
305 }
306 runtime.Freegc(unsafe.Pointer(p), unsafe.Sizeof(*p), noscan)
307 }
308
309 t.Run("basic-free", func(t *testing.T) {
310
311 for range 100 {
312 p := alloc()
313 free(p)
314 }
315 })
316
317 t.Run("stack-free", func(t *testing.T) {
318
319 for range 100 {
320 var x [32]byte
321 var y [32]*int
322 runtime.Freegc(unsafe.Pointer(&x), unsafe.Sizeof(x), true)
323 runtime.Freegc(unsafe.Pointer(&y), unsafe.Sizeof(y), false)
324 }
325 })
326
327
328
329
330
331
332
333
334 t.Run("allocs-baseline", func(t *testing.T) {
335
336 allocs := testing.AllocsPerRun(100, func() {
337 for range 100 {
338 p := alloc()
339 _ = p
340 }
341 })
342 if allocs < 100 {
343
344
345 t.Fatalf("expected >=100 allocations, got %v", allocs)
346 }
347 })
348
349 t.Run("allocs-with-free", func(t *testing.T) {
350
351
352 if SizeSpecializedMallocEnabled && !noscan {
353
354
355 t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
356 }
357 if !RuntimeFreegcEnabled {
358 t.Skip("skipping alloc tests with runtime.freegc disabled")
359 }
360 allocs := testing.AllocsPerRun(100, func() {
361 for range 100 {
362 p := alloc()
363 free(p)
364 }
365 })
366 if allocs != 0 {
367 t.Fatalf("expected 0 allocations, got %v", allocs)
368 }
369 })
370
371 t.Run("free-multiple", func(t *testing.T) {
372
373
374
375 if SizeSpecializedMallocEnabled && !noscan {
376
377
378 t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
379 }
380 if !RuntimeFreegcEnabled {
381 t.Skip("skipping alloc tests with runtime.freegc disabled")
382 }
383 const maxOutstanding = 20
384 s := make([]*T, 0, maxOutstanding)
385 allocs := testing.AllocsPerRun(100*stressMultiple, func() {
386 s = s[:0]
387 for range maxOutstanding {
388 p := alloc()
389 s = append(s, p)
390 }
391 for _, p := range s {
392 free(p)
393 }
394 })
395 if allocs != 0 {
396 t.Fatalf("expected 0 allocations, got %v", allocs)
397 }
398 })
399
400 if runtime.GOARCH == "wasm" {
401
402
403
404
405
406 t.Skip("skipping remaining freegc tests, was timing out on wasm")
407 }
408
409 t.Run("free-many", func(t *testing.T) {
410
411
412 s := make([]*T, 0, 1000)
413 iterations := stressMultiple * stressMultiple
414 for range iterations {
415 s = s[:0]
416 for range 1000 {
417 p := alloc()
418 s = append(s, p)
419 }
420 for _, p := range s {
421 free(p)
422 }
423 }
424 })
425
426 t.Run("duplicate-check", func(t *testing.T) {
427
428
429
430
431 live := make(map[uintptr]bool)
432 for i := range 100 * stressMultiple {
433 var s []*T
434
435 for j := range 10 {
436 p := alloc()
437 uptr := uintptr(unsafe.Pointer(p))
438 if live[uptr] {
439 t.Fatalf("found duplicate pointer (0x%x). i: %d j: %d", uptr, i, j)
440 }
441 live[uptr] = true
442 s = append(s, p)
443 }
444
445 for k := range s {
446 p := s[k]
447 s[k] = nil
448 uptr := uintptr(unsafe.Pointer(p))
449 free(p)
450 delete(live, uptr)
451 }
452 }
453 })
454
455 t.Run("free-other-goroutine", func(t *testing.T) {
456
457
458
459 iterations := 10 * stressMultiple * stressMultiple
460 for _, capacity := range []int{2} {
461 for range iterations {
462 ch := make(chan *T, capacity)
463 var wg sync.WaitGroup
464 for range 2 {
465 wg.Add(1)
466 go func() {
467 defer wg.Done()
468 for p := range ch {
469 free(p)
470 }
471 }()
472 }
473 for range 100 {
474 p := alloc()
475 ch <- p
476 }
477 close(ch)
478 wg.Wait()
479 }
480 }
481 })
482
483 t.Run("many-goroutines", func(t *testing.T) {
484
485
486 counts := []int{1, 2, 4, 8, 10 * stressMultiple}
487 for _, goroutines := range counts {
488 var wg sync.WaitGroup
489 for range goroutines {
490 wg.Add(1)
491 go func() {
492 defer wg.Done()
493 live := make(map[uintptr]bool)
494 for range 100 * stressMultiple {
495 p := alloc()
496 uptr := uintptr(unsafe.Pointer(p))
497 if live[uptr] {
498 panic("TestFreeLive: found duplicate pointer")
499 }
500 live[uptr] = true
501 free(p)
502 delete(live, uptr)
503 }
504 }()
505 }
506 wg.Wait()
507 }
508 })
509
510 t.Run("assist-credit", func(t *testing.T) {
511
512
513
514
515
516
517
518
519
520
521 if SizeSpecializedMallocEnabled && !noscan {
522
523
524 t.Skip("temporarily skip assist credit tests for GOEXPERIMENT=sizespecializedmalloc for pointer types")
525 }
526 if !RuntimeFreegcEnabled {
527 t.Skip("skipping assist credit test with runtime.freegc disabled")
528 }
529
530
531 done := make(chan struct{})
532 defer close(done)
533 go func() {
534 for {
535 select {
536 case <-done:
537 return
538 default:
539 runtime.GC()
540 }
541 }
542 }()
543
544
545
546 counts := []int{1, 2, 10, 100 * stressMultiple}
547
548 defer GOMAXPROCS(GOMAXPROCS(1))
549 size := int64(unsafe.Sizeof(*new(T)))
550 for _, count := range counts {
551
552
553 runtime.GC()
554 for i := range count {
555
556
557 Acquirem()
558
559
560
561
562 p := alloc()
563 free(p)
564
565
566
567 creditStart := AssistCredit()
568 blackenStart := GcBlackenEnable()
569 p = alloc()
570 blackenAfterAlloc := GcBlackenEnable()
571 creditAfterAlloc := AssistCredit()
572 free(p)
573 blackenEnd := GcBlackenEnable()
574 creditEnd := AssistCredit()
575
576 Releasem()
577 GoschedIfBusy()
578
579 delta := creditEnd - creditStart
580 if delta != 0 {
581 t.Logf("assist credit non-zero delta: %d", delta)
582 t.Logf("\t| size: %d i: %d count: %d", size, i, count)
583 t.Logf("\t| credit before: %d credit after: %d", creditStart, creditEnd)
584 t.Logf("\t| alloc delta: %d free delta: %d",
585 creditAfterAlloc-creditStart, creditEnd-creditAfterAlloc)
586 t.Logf("\t| gcBlackenEnable (start / after alloc / end): %v/%v/%v",
587 blackenStart, blackenAfterAlloc, blackenEnd)
588 t.FailNow()
589 }
590 }
591 }
592 })
593 }
594 }
595
596 func TestPageCacheLeak(t *testing.T) {
597 defer GOMAXPROCS(GOMAXPROCS(1))
598 leaked := PageCachePagesLeaked()
599 if leaked != 0 {
600 t.Fatalf("found %d leaked pages in page caches", leaked)
601 }
602 }
603
604 func TestPhysicalMemoryUtilization(t *testing.T) {
605 got := runTestProg(t, "testprog", "GCPhys")
606 want := "OK\n"
607 if got != want {
608 t.Fatalf("expected %q, but got %q", want, got)
609 }
610 }
611
612 func TestScavengedBitsCleared(t *testing.T) {
613 var mismatches [128]BitsMismatch
614 if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
615 t.Errorf("uncleared scavenged bits")
616 for _, m := range mismatches[:n] {
617 t.Logf("\t@ address 0x%x", m.Base)
618 t.Logf("\t| got: %064b", m.Got)
619 t.Logf("\t| want: %064b", m.Want)
620 }
621 t.FailNow()
622 }
623 }
624
625 type acLink struct {
626 x [1 << 20]byte
627 }
628
629 var arenaCollisionSink []*acLink
630
631 func TestArenaCollision(t *testing.T) {
632
633
634 if os.Getenv("TEST_ARENA_COLLISION") != "1" {
635 cmd := testenv.CleanCmdEnv(exec.Command(testenv.Executable(t), "-test.run=^TestArenaCollision$", "-test.v"))
636 cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
637 out, err := cmd.CombinedOutput()
638 if race.Enabled {
639
640
641
642
643
644 if want := "too many address space collisions"; !strings.Contains(string(out), want) {
645 t.Fatalf("want %q, got:\n%s", want, string(out))
646 }
647 } else if !strings.Contains(string(out), "PASS\n") || err != nil {
648 t.Fatalf("%s\n(exit status %v)", string(out), err)
649 }
650 return
651 }
652 disallowed := [][2]uintptr{}
653
654
655 KeepNArenaHints(3)
656
657
658 for i := 0; i < 5; i++ {
659
660
661 start, end, ok := MapNextArenaHint()
662 if !ok {
663 t.Skipf("failed to reserve memory at next arena hint [%#x, %#x)", start, end)
664 }
665 t.Logf("reserved [%#x, %#x)", start, end)
666 disallowed = append(disallowed, [2]uintptr{start, end})
667
668 hint, ok := NextArenaHint()
669 if !ok {
670
671
672
673
674 t.Skip("ran out of arena hints")
675 }
676
677
678
679 for {
680 if next, ok := NextArenaHint(); !ok {
681 t.Skip("ran out of arena hints")
682 } else if next != hint {
683 break
684 }
685 ac := new(acLink)
686 arenaCollisionSink = append(arenaCollisionSink, ac)
687
688
689 p := uintptr(unsafe.Pointer(ac))
690 for _, d := range disallowed {
691 if d[0] <= p && p < d[1] {
692 t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
693 }
694 }
695 }
696 }
697 }
698
699 func BenchmarkMalloc8(b *testing.B) {
700 for i := 0; i < b.N; i++ {
701 p := new(int64)
702 Escape(p)
703 }
704 }
705
706 func BenchmarkMalloc16(b *testing.B) {
707 for i := 0; i < b.N; i++ {
708 p := new([2]int64)
709 Escape(p)
710 }
711 }
712
713 func BenchmarkMalloc32(b *testing.B) {
714 for i := 0; i < b.N; i++ {
715 p := new([4]int64)
716 Escape(p)
717 }
718 }
719
720 func BenchmarkMallocTypeInfo8(b *testing.B) {
721 for i := 0; i < b.N; i++ {
722 p := new(struct {
723 p [8 / unsafe.Sizeof(uintptr(0))]*int
724 })
725 Escape(p)
726 }
727 }
728
729 func BenchmarkMallocTypeInfo16(b *testing.B) {
730 for i := 0; i < b.N; i++ {
731 p := new(struct {
732 p [16 / unsafe.Sizeof(uintptr(0))]*int
733 })
734 Escape(p)
735 }
736 }
737
738 func BenchmarkMallocTypeInfo32(b *testing.B) {
739 for i := 0; i < b.N; i++ {
740 p := new(struct {
741 p [32 / unsafe.Sizeof(uintptr(0))]*int
742 })
743 Escape(p)
744 }
745 }
746
747 type LargeStruct struct {
748 x [16][]byte
749 }
750
751 func BenchmarkMallocLargeStruct(b *testing.B) {
752 for i := 0; i < b.N; i++ {
753 p := make([]LargeStruct, 2)
754 Escape(p)
755 }
756 }
757
758 var n = flag.Int("n", 1000, "number of goroutines")
759
760 func BenchmarkGoroutineSelect(b *testing.B) {
761 quit := make(chan struct{})
762 read := func(ch chan struct{}) {
763 for {
764 select {
765 case _, ok := <-ch:
766 if !ok {
767 return
768 }
769 case <-quit:
770 return
771 }
772 }
773 }
774 benchHelper(b, *n, read)
775 }
776
777 func BenchmarkGoroutineBlocking(b *testing.B) {
778 read := func(ch chan struct{}) {
779 for {
780 if _, ok := <-ch; !ok {
781 return
782 }
783 }
784 }
785 benchHelper(b, *n, read)
786 }
787
788 func BenchmarkGoroutineForRange(b *testing.B) {
789 read := func(ch chan struct{}) {
790 for range ch {
791 }
792 }
793 benchHelper(b, *n, read)
794 }
795
796 func benchHelper(b *testing.B, n int, read func(chan struct{})) {
797 m := make([]chan struct{}, n)
798 for i := range m {
799 m[i] = make(chan struct{}, 1)
800 go read(m[i])
801 }
802 b.StopTimer()
803 b.ResetTimer()
804 GC()
805
806 for i := 0; i < b.N; i++ {
807 for _, ch := range m {
808 if ch != nil {
809 ch <- struct{}{}
810 }
811 }
812 time.Sleep(10 * time.Millisecond)
813 b.StartTimer()
814 GC()
815 b.StopTimer()
816 }
817
818 for _, ch := range m {
819 close(ch)
820 }
821 time.Sleep(10 * time.Millisecond)
822 }
823
824 func BenchmarkGoroutineIdle(b *testing.B) {
825 quit := make(chan struct{})
826 fn := func() {
827 <-quit
828 }
829 for i := 0; i < *n; i++ {
830 go fn()
831 }
832
833 GC()
834 b.ResetTimer()
835
836 for i := 0; i < b.N; i++ {
837 GC()
838 }
839
840 b.StopTimer()
841 close(quit)
842 time.Sleep(10 * time.Millisecond)
843 }
844
845 func TestMkmalloc(t *testing.T) {
846 testenv.MustHaveGoRun(t)
847 testenv.MustHaveExternalNetwork(t)
848 output, err := exec.Command("go", "-C", "_mkmalloc", "test").CombinedOutput()
849 t.Logf("test output:\n%s", output)
850 if err != nil {
851 t.Errorf("_mkmalloc tests failed: %v", err)
852 }
853 }
854
View as plain text