Source file
src/runtime/proc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/race"
10 "internal/testenv"
11 "math"
12 "net"
13 "runtime"
14 "runtime/debug"
15 "strings"
16 "sync"
17 "sync/atomic"
18 "syscall"
19 "testing"
20 "time"
21 )
22
23 var stop = make(chan bool, 1)
24
25 func perpetuumMobile() {
26 select {
27 case <-stop:
28 default:
29 go perpetuumMobile()
30 }
31 }
32
33 func TestStopTheWorldDeadlock(t *testing.T) {
34 if runtime.GOARCH == "wasm" {
35 t.Skip("no preemption on wasm yet")
36 }
37 if testing.Short() {
38 t.Skip("skipping during short test")
39 }
40 maxprocs := runtime.GOMAXPROCS(3)
41 compl := make(chan bool, 2)
42 go func() {
43 for i := 0; i != 1000; i += 1 {
44 runtime.GC()
45 }
46 compl <- true
47 }()
48 go func() {
49 for i := 0; i != 1000; i += 1 {
50 runtime.GOMAXPROCS(3)
51 }
52 compl <- true
53 }()
54 go perpetuumMobile()
55 <-compl
56 <-compl
57 stop <- true
58 runtime.GOMAXPROCS(maxprocs)
59 }
60
61 func TestYieldProgress(t *testing.T) {
62 testYieldProgress(false)
63 }
64
65 func TestYieldLockedProgress(t *testing.T) {
66 testYieldProgress(true)
67 }
68
69 func testYieldProgress(locked bool) {
70 c := make(chan bool)
71 cack := make(chan bool)
72 go func() {
73 if locked {
74 runtime.LockOSThread()
75 }
76 for {
77 select {
78 case <-c:
79 cack <- true
80 return
81 default:
82 runtime.Gosched()
83 }
84 }
85 }()
86 time.Sleep(10 * time.Millisecond)
87 c <- true
88 <-cack
89 }
90
91 func TestYieldLocked(t *testing.T) {
92 const N = 10
93 c := make(chan bool)
94 go func() {
95 runtime.LockOSThread()
96 for i := 0; i < N; i++ {
97 runtime.Gosched()
98 time.Sleep(time.Millisecond)
99 }
100 c <- true
101
102 }()
103 <-c
104 }
105
106 func TestGoroutineParallelism(t *testing.T) {
107 if runtime.NumCPU() == 1 {
108
109 t.Skip("skipping on uniprocessor")
110 }
111 P := 4
112 N := 10
113 if testing.Short() {
114 P = 3
115 N = 3
116 }
117 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
118
119
120
121 defer debug.SetGCPercent(debug.SetGCPercent(-1))
122
123
124
125 runtime.GC()
126 for try := 0; try < N; try++ {
127 done := make(chan bool)
128 x := uint32(0)
129 for p := 0; p < P; p++ {
130
131 go func(p int) {
132 for i := 0; i < 3; i++ {
133 expected := uint32(P*i + p)
134 for atomic.LoadUint32(&x) != expected {
135 }
136 atomic.StoreUint32(&x, expected+1)
137 }
138 done <- true
139 }(p)
140 }
141 for p := 0; p < P; p++ {
142 <-done
143 }
144 }
145 }
146
147
148 func TestGoroutineParallelism2(t *testing.T) {
149
150 testGoroutineParallelism2(t, true, false)
151 testGoroutineParallelism2(t, false, true)
152 testGoroutineParallelism2(t, true, true)
153 }
154
155 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
156 if runtime.NumCPU() == 1 {
157
158 t.Skip("skipping on uniprocessor")
159 }
160 P := 4
161 N := 10
162 if testing.Short() {
163 N = 3
164 }
165 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
166
167
168
169 defer debug.SetGCPercent(debug.SetGCPercent(-1))
170
171
172
173 runtime.GC()
174 for try := 0; try < N; try++ {
175 if load {
176
177
178
179 done := make(chan bool)
180 x := uint32(0)
181 for p := 0; p < P; p++ {
182 go func() {
183 if atomic.AddUint32(&x, 1) == uint32(P) {
184 done <- true
185 return
186 }
187 for atomic.LoadUint32(&x) != uint32(P) {
188 }
189 }()
190 }
191 <-done
192 }
193 if netpoll {
194
195 laddr := "localhost:0"
196 if runtime.GOOS == "android" {
197
198
199
200 laddr = "127.0.0.1:0"
201 }
202 ln, err := net.Listen("tcp", laddr)
203 if err == nil {
204 defer ln.Close()
205 }
206 }
207 done := make(chan bool)
208 x := uint32(0)
209
210 for p := 0; p < P/2; p++ {
211 go func(p int) {
212 for p2 := 0; p2 < 2; p2++ {
213 go func(p2 int) {
214 for i := 0; i < 3; i++ {
215 expected := uint32(P*i + p*2 + p2)
216 for atomic.LoadUint32(&x) != expected {
217 }
218 atomic.StoreUint32(&x, expected+1)
219 }
220 done <- true
221 }(p2)
222 }
223 }(p)
224 }
225 for p := 0; p < P; p++ {
226 <-done
227 }
228 }
229 }
230
231 func TestBlockLocked(t *testing.T) {
232 const N = 10
233 c := make(chan bool)
234 go func() {
235 runtime.LockOSThread()
236 for i := 0; i < N; i++ {
237 c <- true
238 }
239 runtime.UnlockOSThread()
240 }()
241 for i := 0; i < N; i++ {
242 <-c
243 }
244 }
245
246 func TestTimerFairness(t *testing.T) {
247 if runtime.GOARCH == "wasm" {
248 t.Skip("no preemption on wasm yet")
249 }
250
251 done := make(chan bool)
252 c := make(chan bool)
253 for i := 0; i < 2; i++ {
254 go func() {
255 for {
256 select {
257 case c <- true:
258 case <-done:
259 return
260 }
261 }
262 }()
263 }
264
265 timer := time.After(20 * time.Millisecond)
266 for {
267 select {
268 case <-c:
269 case <-timer:
270 close(done)
271 return
272 }
273 }
274 }
275
276 func TestTimerFairness2(t *testing.T) {
277 if runtime.GOARCH == "wasm" {
278 t.Skip("no preemption on wasm yet")
279 }
280
281 done := make(chan bool)
282 c := make(chan bool)
283 for i := 0; i < 2; i++ {
284 go func() {
285 timer := time.After(20 * time.Millisecond)
286 var buf [1]byte
287 for {
288 syscall.Read(0, buf[0:0])
289 select {
290 case c <- true:
291 case <-c:
292 case <-timer:
293 done <- true
294 return
295 }
296 }
297 }()
298 }
299 <-done
300 <-done
301 }
302
303
304
305 var preempt = func() int {
306 var a [128]int
307 sum := 0
308 for _, v := range a {
309 sum += v
310 }
311 return sum
312 }
313
314 func TestPreemption(t *testing.T) {
315 if runtime.GOARCH == "wasm" {
316 t.Skip("no preemption on wasm yet")
317 }
318
319
320 N := 5
321 if testing.Short() {
322 N = 2
323 }
324 c := make(chan bool)
325 var x uint32
326 for g := 0; g < 2; g++ {
327 go func(g int) {
328 for i := 0; i < N; i++ {
329 for atomic.LoadUint32(&x) != uint32(g) {
330 preempt()
331 }
332 atomic.StoreUint32(&x, uint32(1-g))
333 }
334 c <- true
335 }(g)
336 }
337 <-c
338 <-c
339 }
340
341 func TestPreemptionGC(t *testing.T) {
342 if runtime.GOARCH == "wasm" {
343 t.Skip("no preemption on wasm yet")
344 }
345
346
347 P := 5
348 N := 10
349 if testing.Short() {
350 P = 3
351 N = 2
352 }
353 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
354 var stop uint32
355 for i := 0; i < P; i++ {
356 go func() {
357 for atomic.LoadUint32(&stop) == 0 {
358 preempt()
359 }
360 }()
361 }
362 for i := 0; i < N; i++ {
363 runtime.Gosched()
364 runtime.GC()
365 }
366 atomic.StoreUint32(&stop, 1)
367 }
368
369 func TestAsyncPreempt(t *testing.T) {
370 if !runtime.PreemptMSupported {
371 t.Skip("asynchronous preemption not supported on this platform")
372 }
373 output := runTestProg(t, "testprog", "AsyncPreempt")
374 want := "OK\n"
375 if output != want {
376 t.Fatalf("want %s, got %s\n", want, output)
377 }
378 }
379
380 func TestGCFairness(t *testing.T) {
381 output := runTestProg(t, "testprog", "GCFairness")
382 want := "OK\n"
383 if output != want {
384 t.Fatalf("want %s, got %s\n", want, output)
385 }
386 }
387
388 func TestGCFairness2(t *testing.T) {
389 output := runTestProg(t, "testprog", "GCFairness2")
390 want := "OK\n"
391 if output != want {
392 t.Fatalf("want %s, got %s\n", want, output)
393 }
394 }
395
396 func TestNumGoroutine(t *testing.T) {
397 output := runTestProg(t, "testprog", "NumGoroutine")
398 want := "1\n"
399 if output != want {
400 t.Fatalf("want %q, got %q", want, output)
401 }
402
403 buf := make([]byte, 1<<20)
404
405
406
407
408 for i := 0; ; i++ {
409
410
411
412
413 runtime.Gosched()
414
415 n := runtime.NumGoroutine()
416 buf = buf[:runtime.Stack(buf, true)]
417
418
419
420 output := strings.ReplaceAll(string(buf), "in goroutine", "")
421 nstk := strings.Count(output, "goroutine ")
422 if n == nstk {
423 break
424 }
425 if i >= 10 {
426 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
427 }
428 }
429 }
430
431 func TestPingPongHog(t *testing.T) {
432 if runtime.GOARCH == "wasm" {
433 t.Skip("no preemption on wasm yet")
434 }
435 if testing.Short() {
436 t.Skip("skipping in -short mode")
437 }
438 if race.Enabled {
439
440
441 t.Skip("skipping in -race mode")
442 }
443
444 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
445 done := make(chan bool)
446 hogChan, lightChan := make(chan bool), make(chan bool)
447 hogCount, lightCount := 0, 0
448
449 run := func(limit int, counter *int, wake chan bool) {
450 for {
451 select {
452 case <-done:
453 return
454
455 case <-wake:
456 for i := 0; i < limit; i++ {
457 *counter++
458 }
459 wake <- true
460 }
461 }
462 }
463
464
465 for i := 0; i < 2; i++ {
466 go run(1e6, &hogCount, hogChan)
467 }
468
469
470 for i := 0; i < 2; i++ {
471 go run(1e3, &lightCount, lightChan)
472 }
473
474
475 hogChan <- true
476 lightChan <- true
477 time.Sleep(100 * time.Millisecond)
478 close(done)
479 <-hogChan
480 <-lightChan
481
482
483
484
485
486
487
488 const factor = 20
489 if hogCount/factor > lightCount || lightCount/factor > hogCount {
490 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
491 }
492 }
493
494 func BenchmarkPingPongHog(b *testing.B) {
495 if b.N == 0 {
496 return
497 }
498 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
499
500
501 stop, done := make(chan bool), make(chan bool)
502 go func() {
503 for {
504 select {
505 case <-stop:
506 done <- true
507 return
508 default:
509 }
510 }
511 }()
512
513
514 ping, pong := make(chan bool), make(chan bool)
515 go func() {
516 for j := 0; j < b.N; j++ {
517 pong <- <-ping
518 }
519 close(stop)
520 done <- true
521 }()
522 go func() {
523 for i := 0; i < b.N; i++ {
524 ping <- <-pong
525 }
526 done <- true
527 }()
528 b.ResetTimer()
529 ping <- true
530 <-stop
531 b.StopTimer()
532 <-ping
533 <-done
534 <-done
535 <-done
536 }
537
538 var padData [128]uint64
539
540 func stackGrowthRecursive(i int) {
541 var pad [128]uint64
542 pad = padData
543 for j := range pad {
544 if pad[j] != 0 {
545 return
546 }
547 }
548 if i != 0 {
549 stackGrowthRecursive(i - 1)
550 }
551 }
552
553 func TestPreemptSplitBig(t *testing.T) {
554 if testing.Short() {
555 t.Skip("skipping in -short mode")
556 }
557 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
558 stop := make(chan int)
559 go big(stop)
560 for i := 0; i < 3; i++ {
561 time.Sleep(10 * time.Microsecond)
562 runtime.GC()
563 }
564 close(stop)
565 }
566
567 func big(stop chan int) int {
568 n := 0
569 for {
570
571 for i := 0; i < 1e9; i++ {
572 n++
573 }
574
575
576 bigframe(stop)
577
578
579 select {
580 case <-stop:
581 return n
582 }
583 }
584 }
585
586 func bigframe(stop chan int) int {
587
588
589
590 var x [8192]byte
591 return small(stop, &x)
592 }
593
594 func small(stop chan int, x *[8192]byte) int {
595 for i := range x {
596 x[i] = byte(i)
597 }
598 sum := 0
599 for i := range x {
600 sum += int(x[i])
601 }
602
603
604
605 nonleaf(stop)
606
607 return sum
608 }
609
610 func nonleaf(stop chan int) bool {
611
612 select {
613 case <-stop:
614 return true
615 default:
616 return false
617 }
618 }
619
620 func TestSchedLocalQueue(t *testing.T) {
621 runtime.RunSchedLocalQueueTest()
622 }
623
624 func TestSchedLocalQueueSteal(t *testing.T) {
625 runtime.RunSchedLocalQueueStealTest()
626 }
627
628 func TestSchedLocalQueueEmpty(t *testing.T) {
629 if runtime.NumCPU() == 1 {
630
631 t.Skip("skipping on uniprocessor")
632 }
633 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
634
635
636
637 defer debug.SetGCPercent(debug.SetGCPercent(-1))
638
639
640
641 runtime.GC()
642
643 iters := int(1e5)
644 if testing.Short() {
645 iters = 1e2
646 }
647 runtime.RunSchedLocalQueueEmptyTest(iters)
648 }
649
650 func benchmarkStackGrowth(b *testing.B, rec int) {
651 b.RunParallel(func(pb *testing.PB) {
652 for pb.Next() {
653 stackGrowthRecursive(rec)
654 }
655 })
656 }
657
658 func BenchmarkStackGrowth(b *testing.B) {
659 benchmarkStackGrowth(b, 10)
660 }
661
662 func BenchmarkStackGrowthDeep(b *testing.B) {
663 benchmarkStackGrowth(b, 1024)
664 }
665
666 func BenchmarkCreateGoroutines(b *testing.B) {
667 benchmarkCreateGoroutines(b, 1)
668 }
669
670 func BenchmarkCreateGoroutinesParallel(b *testing.B) {
671 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
672 }
673
674 func benchmarkCreateGoroutines(b *testing.B, procs int) {
675 c := make(chan bool)
676 var f func(n int)
677 f = func(n int) {
678 if n == 0 {
679 c <- true
680 return
681 }
682 go f(n - 1)
683 }
684 for i := 0; i < procs; i++ {
685 go f(b.N / procs)
686 }
687 for i := 0; i < procs; i++ {
688 <-c
689 }
690 }
691
692 func BenchmarkCreateGoroutinesCapture(b *testing.B) {
693 b.ReportAllocs()
694 for i := 0; i < b.N; i++ {
695 const N = 4
696 var wg sync.WaitGroup
697 wg.Add(N)
698 for i := 0; i < N; i++ {
699 go func() {
700 if i >= N {
701 b.Logf("bad")
702 }
703 wg.Done()
704 }()
705 }
706 wg.Wait()
707 }
708 }
709
710
711
712 func warmupScheduler(targetThreadCount int) {
713 var wg sync.WaitGroup
714 var count int32
715 for i := 0; i < targetThreadCount; i++ {
716 wg.Add(1)
717 go func() {
718 atomic.AddInt32(&count, 1)
719 for atomic.LoadInt32(&count) < int32(targetThreadCount) {
720
721 }
722
723
724 doWork(time.Millisecond)
725 wg.Done()
726 }()
727 }
728 wg.Wait()
729 }
730
731 func doWork(dur time.Duration) {
732 start := time.Now()
733 for time.Since(start) < dur {
734 }
735 }
736
737
738
739
740
741
742
743 func BenchmarkCreateGoroutinesSingle(b *testing.B) {
744
745
746 warmupScheduler(runtime.GOMAXPROCS(0))
747 b.ResetTimer()
748
749 var wg sync.WaitGroup
750 wg.Add(b.N)
751 for i := 0; i < b.N; i++ {
752 go func() {
753 wg.Done()
754 }()
755 }
756 wg.Wait()
757 }
758
759 func BenchmarkClosureCall(b *testing.B) {
760 sum := 0
761 off1 := 1
762 for i := 0; i < b.N; i++ {
763 off2 := 2
764 func() {
765 sum += i + off1 + off2
766 }()
767 }
768 _ = sum
769 }
770
771 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
772 if runtime.GOMAXPROCS(0) == 1 {
773 b.Skip("skipping: GOMAXPROCS=1")
774 }
775
776 wakeDelay := 5 * time.Microsecond
777 for _, delay := range []time.Duration{
778 0,
779 1 * time.Microsecond,
780 2 * time.Microsecond,
781 5 * time.Microsecond,
782 10 * time.Microsecond,
783 20 * time.Microsecond,
784 50 * time.Microsecond,
785 100 * time.Microsecond,
786 } {
787 b.Run(delay.String(), func(b *testing.B) {
788 if b.N == 0 {
789 return
790 }
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823 ping, pong := make(chan struct{}), make(chan struct{})
824 start := make(chan struct{})
825 done := make(chan struct{})
826 go func() {
827 <-start
828 for i := 0; i < b.N; i++ {
829
830 spin(delay + wakeDelay)
831 ping <- struct{}{}
832
833 spin(delay)
834 <-pong
835 }
836 done <- struct{}{}
837 }()
838 go func() {
839 for i := 0; i < b.N; i++ {
840
841 spin(delay)
842 <-ping
843
844 spin(delay + wakeDelay)
845 pong <- struct{}{}
846 }
847 done <- struct{}{}
848 }()
849 b.ResetTimer()
850 start <- struct{}{}
851 <-done
852 <-done
853 })
854 }
855 }
856
857 func BenchmarkWakeupParallelSpinning(b *testing.B) {
858 benchmarkWakeupParallel(b, func(d time.Duration) {
859 end := time.Now().Add(d)
860 for time.Now().Before(end) {
861
862 }
863 })
864 }
865
866
867
868
869
870 var sysNanosleep func(d time.Duration)
871
872 func BenchmarkWakeupParallelSyscall(b *testing.B) {
873 if sysNanosleep == nil {
874 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
875 }
876 benchmarkWakeupParallel(b, func(d time.Duration) {
877 sysNanosleep(d)
878 })
879 }
880
881 type Matrix [][]float64
882
883 func BenchmarkMatmult(b *testing.B) {
884 b.StopTimer()
885
886
887 n := int(math.Cbrt(float64(b.N))) + 1
888 A := makeMatrix(n)
889 B := makeMatrix(n)
890 C := makeMatrix(n)
891 b.StartTimer()
892 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
893 }
894
895 func makeMatrix(n int) Matrix {
896 m := make(Matrix, n)
897 for i := 0; i < n; i++ {
898 m[i] = make([]float64, n)
899 for j := 0; j < n; j++ {
900 m[i][j] = float64(i*n + j)
901 }
902 }
903 return m
904 }
905
906 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
907 di := i1 - i0
908 dj := j1 - j0
909 dk := k1 - k0
910 if di >= dj && di >= dk && di >= threshold {
911
912 mi := i0 + di/2
913 done1 := make(chan struct{}, 1)
914 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
915 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
916 <-done1
917 } else if dj >= dk && dj >= threshold {
918
919 mj := j0 + dj/2
920 done1 := make(chan struct{}, 1)
921 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
922 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
923 <-done1
924 } else if dk >= threshold {
925
926
927 mk := k0 + dk/2
928 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
929 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
930 } else {
931
932 for i := i0; i < i1; i++ {
933 for j := j0; j < j1; j++ {
934 for k := k0; k < k1; k++ {
935 C[i][j] += A[i][k] * B[k][j]
936 }
937 }
938 }
939 }
940 if done != nil {
941 done <- struct{}{}
942 }
943 }
944
945 func TestStealOrder(t *testing.T) {
946 runtime.RunStealOrderTest()
947 }
948
949 func TestLockOSThreadNesting(t *testing.T) {
950 if runtime.GOARCH == "wasm" {
951 t.Skip("no threads on wasm yet")
952 }
953
954 go func() {
955 e, i := runtime.LockOSCounts()
956 if e != 0 || i != 0 {
957 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
958 return
959 }
960 runtime.LockOSThread()
961 runtime.LockOSThread()
962 runtime.UnlockOSThread()
963 e, i = runtime.LockOSCounts()
964 if e != 1 || i != 0 {
965 t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
966 return
967 }
968 runtime.UnlockOSThread()
969 e, i = runtime.LockOSCounts()
970 if e != 0 || i != 0 {
971 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
972 return
973 }
974 }()
975 }
976
977 func TestLockOSThreadExit(t *testing.T) {
978 testLockOSThreadExit(t, "testprog")
979 }
980
981 func testLockOSThreadExit(t *testing.T, prog string) {
982 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
983 want := "OK\n"
984 if output != want {
985 t.Errorf("want %q, got %q", want, output)
986 }
987
988 output = runTestProg(t, prog, "LockOSThreadAlt")
989 if output != want {
990 t.Errorf("want %q, got %q", want, output)
991 }
992 }
993
994 func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
995 want := "OK\n"
996 skip := "unshare not permitted\n"
997 output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
998 if output == skip {
999 t.Skip("unshare syscall not permitted on this system")
1000 } else if output != want {
1001 t.Errorf("want %q, got %q", want, output)
1002 }
1003 }
1004
1005 func TestLockOSThreadTemplateThreadRace(t *testing.T) {
1006 testenv.MustHaveGoRun(t)
1007
1008 exe, err := buildTestProg(t, "testprog")
1009 if err != nil {
1010 t.Fatal(err)
1011 }
1012
1013 iterations := 100
1014 if testing.Short() {
1015
1016
1017 iterations = 5
1018 }
1019 for i := 0; i < iterations; i++ {
1020 want := "OK\n"
1021 output := runBuiltTestProg(t, exe, "LockOSThreadTemplateThreadRace")
1022 if output != want {
1023 t.Fatalf("run %d: want %q, got %q", i, want, output)
1024 }
1025 }
1026 }
1027
1028 func TestLockOSThreadVgetrandom(t *testing.T) {
1029 if runtime.GOOS != "linux" {
1030 t.Skipf("vgetrandom only relevant on Linux")
1031 }
1032 output := runTestProg(t, "testprog", "LockOSThreadVgetrandom")
1033 want := "OK\n"
1034 if output != want {
1035 t.Errorf("want %q, got %q", want, output)
1036 }
1037 }
1038
1039
1040
1041
1042 func fakeSyscall(duration time.Duration) {
1043 runtime.Entersyscall()
1044 for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {
1045 }
1046 runtime.Exitsyscall()
1047 }
1048
1049
1050 func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) {
1051 if runtime.GOARCH == "wasm" {
1052 t.Skip("no preemption on wasm yet")
1053 }
1054
1055 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
1056
1057 iterations := 10
1058 if testing.Short() {
1059 iterations = 1
1060 }
1061 const (
1062 maxDuration = 5 * time.Second
1063 nroutines = 8
1064 )
1065
1066 for i := 0; i < iterations; i++ {
1067 c := make(chan bool, nroutines)
1068 stop := uint32(0)
1069
1070 start := time.Now()
1071 for g := 0; g < nroutines; g++ {
1072 go func(stop *uint32) {
1073 c <- true
1074 for atomic.LoadUint32(stop) == 0 {
1075 fakeSyscall(syscallDuration)
1076 }
1077 c <- true
1078 }(&stop)
1079 }
1080
1081 for g := 0; g < nroutines; g++ {
1082 <-c
1083 }
1084 atomic.StoreUint32(&stop, 1)
1085
1086 for g := 0; g < nroutines; g++ {
1087 <-c
1088 }
1089 duration := time.Since(start)
1090
1091 if duration > maxDuration {
1092 t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration)
1093 }
1094 }
1095 }
1096
1097 func TestPreemptionAfterSyscall(t *testing.T) {
1098 if runtime.GOOS == "plan9" {
1099 testenv.SkipFlaky(t, 41015)
1100 }
1101
1102 for _, i := range []time.Duration{10, 100, 1000} {
1103 d := i * time.Microsecond
1104 t.Run(fmt.Sprint(d), func(t *testing.T) {
1105 testPreemptionAfterSyscall(t, d)
1106 })
1107 }
1108 }
1109
1110 func TestGetgThreadSwitch(t *testing.T) {
1111 runtime.RunGetgThreadSwitchTest()
1112 }
1113
1114
1115
1116
1117
1118 func TestNetpollBreak(t *testing.T) {
1119 if runtime.GOMAXPROCS(0) == 1 {
1120 t.Skip("skipping: GOMAXPROCS=1")
1121 }
1122
1123
1124 runtime.NetpollGenericInit()
1125
1126 start := time.Now()
1127 c := make(chan bool, 2)
1128 go func() {
1129 c <- true
1130 runtime.Netpoll(10 * time.Second.Nanoseconds())
1131 c <- true
1132 }()
1133 <-c
1134
1135
1136
1137 loop:
1138 for {
1139 runtime.Usleep(100)
1140 runtime.NetpollBreak()
1141 runtime.NetpollBreak()
1142 select {
1143 case <-c:
1144 break loop
1145 default:
1146 }
1147 }
1148 if dur := time.Since(start); dur > 5*time.Second {
1149 t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur)
1150 }
1151 }
1152
1153
1154
1155 func TestBigGOMAXPROCS(t *testing.T) {
1156 t.Parallel()
1157 output := runTestProg(t, "testprog", "NonexistentTest", "GOMAXPROCS=1024")
1158
1159 for _, errstr := range []string{
1160 "failed to create new OS thread",
1161 "cannot allocate memory",
1162 } {
1163 if strings.Contains(output, errstr) {
1164 t.Skipf("failed to create 1024 threads")
1165 }
1166 }
1167 if !strings.Contains(output, "unknown function: NonexistentTest") {
1168 t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output)
1169 }
1170 }
1171
View as plain text