Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/stringslite"
14 "runtime/internal/sys"
15 "unsafe"
16 )
17
18
19 var modinfo string
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115 var (
116 m0 m
117 g0 g
118 mcache0 *mcache
119 raceprocctx0 uintptr
120 raceFiniLock mutex
121 )
122
123
124
125 var runtime_inittasks []*initTask
126
127
128
129
130
131 var main_init_done chan bool
132
133
134 func main_main()
135
136
137 var mainStarted bool
138
139
140 var runtimeInitTime int64
141
142
143 var initSigmask sigset
144
145
146 func main() {
147 mp := getg().m
148
149
150
151 mp.g0.racectx = 0
152
153
154
155
156 if goarch.PtrSize == 8 {
157 maxstacksize = 1000000000
158 } else {
159 maxstacksize = 250000000
160 }
161
162
163
164
165 maxstackceiling = 2 * maxstacksize
166
167
168 mainStarted = true
169
170 if haveSysmon {
171 systemstack(func() {
172 newm(sysmon, nil, -1)
173 })
174 }
175
176
177
178
179
180
181
182 lockOSThread()
183
184 if mp != &m0 {
185 throw("runtime.main not on m0")
186 }
187
188
189
190 runtimeInitTime = nanotime()
191 if runtimeInitTime == 0 {
192 throw("nanotime returning zero")
193 }
194
195 if debug.inittrace != 0 {
196 inittrace.id = getg().goid
197 inittrace.active = true
198 }
199
200 doInit(runtime_inittasks)
201
202
203 needUnlock := true
204 defer func() {
205 if needUnlock {
206 unlockOSThread()
207 }
208 }()
209
210 gcenable()
211
212 main_init_done = make(chan bool)
213 if iscgo {
214 if _cgo_pthread_key_created == nil {
215 throw("_cgo_pthread_key_created missing")
216 }
217
218 if _cgo_thread_start == nil {
219 throw("_cgo_thread_start missing")
220 }
221 if GOOS != "windows" {
222 if _cgo_setenv == nil {
223 throw("_cgo_setenv missing")
224 }
225 if _cgo_unsetenv == nil {
226 throw("_cgo_unsetenv missing")
227 }
228 }
229 if _cgo_notify_runtime_init_done == nil {
230 throw("_cgo_notify_runtime_init_done missing")
231 }
232
233
234 if set_crosscall2 == nil {
235 throw("set_crosscall2 missing")
236 }
237 set_crosscall2()
238
239
240
241 startTemplateThread()
242 cgocall(_cgo_notify_runtime_init_done, nil)
243 }
244
245
246
247
248
249
250
251
252 for m := &firstmoduledata; m != nil; m = m.next {
253 doInit(m.inittasks)
254 }
255
256
257
258 inittrace.active = false
259
260 close(main_init_done)
261
262 needUnlock = false
263 unlockOSThread()
264
265 if isarchive || islibrary {
266
267
268 return
269 }
270 fn := main_main
271 fn()
272 if raceenabled {
273 runExitHooks(0)
274 racefini()
275 }
276
277
278
279
280
281 if runningPanicDefers.Load() != 0 {
282
283 for c := 0; c < 1000; c++ {
284 if runningPanicDefers.Load() == 0 {
285 break
286 }
287 Gosched()
288 }
289 }
290 if panicking.Load() != 0 {
291 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
292 }
293 runExitHooks(0)
294
295 exit(0)
296 for {
297 var x *int32
298 *x = 0
299 }
300 }
301
302
303
304
305 func os_beforeExit(exitCode int) {
306 runExitHooks(exitCode)
307 if exitCode == 0 && raceenabled {
308 racefini()
309 }
310 }
311
312
313 func init() {
314 go forcegchelper()
315 }
316
317 func forcegchelper() {
318 forcegc.g = getg()
319 lockInit(&forcegc.lock, lockRankForcegc)
320 for {
321 lock(&forcegc.lock)
322 if forcegc.idle.Load() {
323 throw("forcegc: phase error")
324 }
325 forcegc.idle.Store(true)
326 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
327
328 if debug.gctrace > 0 {
329 println("GC forced")
330 }
331
332 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
333 }
334 }
335
336
337
338
339
340 func Gosched() {
341 checkTimeouts()
342 mcall(gosched_m)
343 }
344
345
346
347
348
349 func goschedguarded() {
350 mcall(goschedguarded_m)
351 }
352
353
354
355
356
357
358 func goschedIfBusy() {
359 gp := getg()
360
361
362 if !gp.preempt && sched.npidle.Load() > 0 {
363 return
364 }
365 mcall(gosched_m)
366 }
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
386 if reason != waitReasonSleep {
387 checkTimeouts()
388 }
389 mp := acquirem()
390 gp := mp.curg
391 status := readgstatus(gp)
392 if status != _Grunning && status != _Gscanrunning {
393 throw("gopark: bad g status")
394 }
395 mp.waitlock = lock
396 mp.waitunlockf = unlockf
397 gp.waitreason = reason
398 mp.waitTraceBlockReason = traceReason
399 mp.waitTraceSkip = traceskip
400 releasem(mp)
401
402 mcall(park_m)
403 }
404
405
406
407 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
408 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
409 }
410
411 func goready(gp *g, traceskip int) {
412 systemstack(func() {
413 ready(gp, traceskip, true)
414 })
415 }
416
417
418 func acquireSudog() *sudog {
419
420
421
422
423
424
425
426
427 mp := acquirem()
428 pp := mp.p.ptr()
429 if len(pp.sudogcache) == 0 {
430 lock(&sched.sudoglock)
431
432 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
433 s := sched.sudogcache
434 sched.sudogcache = s.next
435 s.next = nil
436 pp.sudogcache = append(pp.sudogcache, s)
437 }
438 unlock(&sched.sudoglock)
439
440 if len(pp.sudogcache) == 0 {
441 pp.sudogcache = append(pp.sudogcache, new(sudog))
442 }
443 }
444 n := len(pp.sudogcache)
445 s := pp.sudogcache[n-1]
446 pp.sudogcache[n-1] = nil
447 pp.sudogcache = pp.sudogcache[:n-1]
448 if s.elem != nil {
449 throw("acquireSudog: found s.elem != nil in cache")
450 }
451 releasem(mp)
452 return s
453 }
454
455
456 func releaseSudog(s *sudog) {
457 if s.elem != nil {
458 throw("runtime: sudog with non-nil elem")
459 }
460 if s.isSelect {
461 throw("runtime: sudog with non-false isSelect")
462 }
463 if s.next != nil {
464 throw("runtime: sudog with non-nil next")
465 }
466 if s.prev != nil {
467 throw("runtime: sudog with non-nil prev")
468 }
469 if s.waitlink != nil {
470 throw("runtime: sudog with non-nil waitlink")
471 }
472 if s.c != nil {
473 throw("runtime: sudog with non-nil c")
474 }
475 gp := getg()
476 if gp.param != nil {
477 throw("runtime: releaseSudog with non-nil gp.param")
478 }
479 mp := acquirem()
480 pp := mp.p.ptr()
481 if len(pp.sudogcache) == cap(pp.sudogcache) {
482
483 var first, last *sudog
484 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
485 n := len(pp.sudogcache)
486 p := pp.sudogcache[n-1]
487 pp.sudogcache[n-1] = nil
488 pp.sudogcache = pp.sudogcache[:n-1]
489 if first == nil {
490 first = p
491 } else {
492 last.next = p
493 }
494 last = p
495 }
496 lock(&sched.sudoglock)
497 last.next = sched.sudogcache
498 sched.sudogcache = first
499 unlock(&sched.sudoglock)
500 }
501 pp.sudogcache = append(pp.sudogcache, s)
502 releasem(mp)
503 }
504
505
506 func badmcall(fn func(*g)) {
507 throw("runtime: mcall called on m->g0 stack")
508 }
509
510 func badmcall2(fn func(*g)) {
511 throw("runtime: mcall function returned")
512 }
513
514 func badreflectcall() {
515 panic(plainError("arg size to reflect.call more than 1GB"))
516 }
517
518
519
520 func badmorestackg0() {
521 if !crashStackImplemented {
522 writeErrStr("fatal: morestack on g0\n")
523 return
524 }
525
526 g := getg()
527 switchToCrashStack(func() {
528 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
529 g.m.traceback = 2
530 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
531 print("\n")
532
533 throw("morestack on g0")
534 })
535 }
536
537
538
539 func badmorestackgsignal() {
540 writeErrStr("fatal: morestack on gsignal\n")
541 }
542
543
544 func badctxt() {
545 throw("ctxt != 0")
546 }
547
548
549
550 var gcrash g
551
552 var crashingG atomic.Pointer[g]
553
554
555
556
557
558
559
560
561
562 func switchToCrashStack(fn func()) {
563 me := getg()
564 if crashingG.CompareAndSwapNoWB(nil, me) {
565 switchToCrashStack0(fn)
566 abort()
567 }
568 if crashingG.Load() == me {
569
570 writeErrStr("fatal: recursive switchToCrashStack\n")
571 abort()
572 }
573
574 usleep_no_g(100)
575 writeErrStr("fatal: concurrent switchToCrashStack\n")
576 abort()
577 }
578
579
580
581
582 const crashStackImplemented = GOOS != "windows"
583
584
585 func switchToCrashStack0(fn func())
586
587 func lockedOSThread() bool {
588 gp := getg()
589 return gp.lockedm != 0 && gp.m.lockedg != 0
590 }
591
592 var (
593
594
595
596
597
598
599 allglock mutex
600 allgs []*g
601
602
603
604
605
606
607
608
609
610
611
612
613
614 allglen uintptr
615 allgptr **g
616 )
617
618 func allgadd(gp *g) {
619 if readgstatus(gp) == _Gidle {
620 throw("allgadd: bad status Gidle")
621 }
622
623 lock(&allglock)
624 allgs = append(allgs, gp)
625 if &allgs[0] != allgptr {
626 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
627 }
628 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
629 unlock(&allglock)
630 }
631
632
633
634
635 func allGsSnapshot() []*g {
636 assertWorldStoppedOrLockHeld(&allglock)
637
638
639
640
641
642
643 return allgs[:len(allgs):len(allgs)]
644 }
645
646
647 func atomicAllG() (**g, uintptr) {
648 length := atomic.Loaduintptr(&allglen)
649 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
650 return ptr, length
651 }
652
653
654 func atomicAllGIndex(ptr **g, i uintptr) *g {
655 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
656 }
657
658
659
660
661 func forEachG(fn func(gp *g)) {
662 lock(&allglock)
663 for _, gp := range allgs {
664 fn(gp)
665 }
666 unlock(&allglock)
667 }
668
669
670
671
672
673 func forEachGRace(fn func(gp *g)) {
674 ptr, length := atomicAllG()
675 for i := uintptr(0); i < length; i++ {
676 gp := atomicAllGIndex(ptr, i)
677 fn(gp)
678 }
679 return
680 }
681
682 const (
683
684
685 _GoidCacheBatch = 16
686 )
687
688
689
690 func cpuinit(env string) {
691 switch GOOS {
692 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
693 cpu.DebugOptions = true
694 }
695 cpu.Initialize(env)
696
697
698
699 switch GOARCH {
700 case "386", "amd64":
701 x86HasPOPCNT = cpu.X86.HasPOPCNT
702 x86HasSSE41 = cpu.X86.HasSSE41
703 x86HasFMA = cpu.X86.HasFMA
704
705 case "arm":
706 armHasVFPv4 = cpu.ARM.HasVFPv4
707
708 case "arm64":
709 arm64HasATOMICS = cpu.ARM64.HasATOMICS
710 }
711 }
712
713
714
715
716 func getGodebugEarly() string {
717 const prefix = "GODEBUG="
718 var env string
719 switch GOOS {
720 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
721
722
723
724 n := int32(0)
725 for argv_index(argv, argc+1+n) != nil {
726 n++
727 }
728
729 for i := int32(0); i < n; i++ {
730 p := argv_index(argv, argc+1+i)
731 s := unsafe.String(p, findnull(p))
732
733 if stringslite.HasPrefix(s, prefix) {
734 env = gostring(p)[len(prefix):]
735 break
736 }
737 }
738 }
739 return env
740 }
741
742
743
744
745
746
747
748
749
750 func schedinit() {
751 lockInit(&sched.lock, lockRankSched)
752 lockInit(&sched.sysmonlock, lockRankSysmon)
753 lockInit(&sched.deferlock, lockRankDefer)
754 lockInit(&sched.sudoglock, lockRankSudog)
755 lockInit(&deadlock, lockRankDeadlock)
756 lockInit(&paniclk, lockRankPanic)
757 lockInit(&allglock, lockRankAllg)
758 lockInit(&allpLock, lockRankAllp)
759 lockInit(&reflectOffs.lock, lockRankReflectOffs)
760 lockInit(&finlock, lockRankFin)
761 lockInit(&cpuprof.lock, lockRankCpuprof)
762 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
763 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
764 traceLockInit()
765
766
767
768 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
769
770
771
772 gp := getg()
773 if raceenabled {
774 gp.racectx, raceprocctx0 = raceinit()
775 }
776
777 sched.maxmcount = 10000
778 crashFD.Store(^uintptr(0))
779
780
781 worldStopped()
782
783 ticks.init()
784 moduledataverify()
785 stackinit()
786 mallocinit()
787 godebug := getGodebugEarly()
788 cpuinit(godebug)
789 randinit()
790 alginit()
791 mcommoninit(gp.m, -1)
792 modulesinit()
793 typelinksinit()
794 itabsinit()
795 stkobjinit()
796
797 sigsave(&gp.m.sigmask)
798 initSigmask = gp.m.sigmask
799
800 goargs()
801 goenvs()
802 secure()
803 checkfds()
804 parsedebugvars()
805 gcinit()
806
807
808
809 gcrash.stack = stackalloc(16384)
810 gcrash.stackguard0 = gcrash.stack.lo + 1000
811 gcrash.stackguard1 = gcrash.stack.lo + 1000
812
813
814
815
816
817 if disableMemoryProfiling {
818 MemProfileRate = 0
819 }
820
821 lock(&sched.lock)
822 sched.lastpoll.Store(nanotime())
823 procs := ncpu
824 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
825 procs = n
826 }
827 if procresize(procs) != nil {
828 throw("unknown runnable goroutine during bootstrap")
829 }
830 unlock(&sched.lock)
831
832
833 worldStarted()
834
835 if buildVersion == "" {
836
837
838 buildVersion = "unknown"
839 }
840 if len(modinfo) == 1 {
841
842
843 modinfo = ""
844 }
845 }
846
847 func dumpgstatus(gp *g) {
848 thisg := getg()
849 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
850 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
851 }
852
853
854 func checkmcount() {
855 assertLockHeld(&sched.lock)
856
857
858
859
860
861
862
863
864
865 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
866 if count > sched.maxmcount {
867 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
868 throw("thread exhaustion")
869 }
870 }
871
872
873
874
875
876 func mReserveID() int64 {
877 assertLockHeld(&sched.lock)
878
879 if sched.mnext+1 < sched.mnext {
880 throw("runtime: thread ID overflow")
881 }
882 id := sched.mnext
883 sched.mnext++
884 checkmcount()
885 return id
886 }
887
888
889 func mcommoninit(mp *m, id int64) {
890 gp := getg()
891
892
893 if gp != gp.m.g0 {
894 callers(1, mp.createstack[:])
895 }
896
897 lock(&sched.lock)
898
899 if id >= 0 {
900 mp.id = id
901 } else {
902 mp.id = mReserveID()
903 }
904
905 mrandinit(mp)
906
907 mpreinit(mp)
908 if mp.gsignal != nil {
909 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
910 }
911
912
913
914 mp.alllink = allm
915
916
917
918 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
919 unlock(&sched.lock)
920
921
922 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
923 mp.cgoCallers = new(cgoCallers)
924 }
925 mProfStackInit(mp)
926 }
927
928
929
930
931
932 func mProfStackInit(mp *m) {
933 mp.profStack = make([]uintptr, maxStack)
934 mp.mLockProfile.stack = make([]uintptr, maxStack)
935 }
936
937 func (mp *m) becomeSpinning() {
938 mp.spinning = true
939 sched.nmspinning.Add(1)
940 sched.needspinning.Store(0)
941 }
942
943 func (mp *m) hasCgoOnStack() bool {
944 return mp.ncgo > 0 || mp.isextra
945 }
946
947 const (
948
949
950 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
951
952
953
954 osHasLowResClockInt = goos.IsWindows
955
956
957
958 osHasLowResClock = osHasLowResClockInt > 0
959 )
960
961
962 func ready(gp *g, traceskip int, next bool) {
963 status := readgstatus(gp)
964
965
966 mp := acquirem()
967 if status&^_Gscan != _Gwaiting {
968 dumpgstatus(gp)
969 throw("bad g->status in ready")
970 }
971
972
973 trace := traceAcquire()
974 casgstatus(gp, _Gwaiting, _Grunnable)
975 if trace.ok() {
976 trace.GoUnpark(gp, traceskip)
977 traceRelease(trace)
978 }
979 runqput(mp.p.ptr(), gp, next)
980 wakep()
981 releasem(mp)
982 }
983
984
985
986 const freezeStopWait = 0x7fffffff
987
988
989
990 var freezing atomic.Bool
991
992
993
994
995 func freezetheworld() {
996 freezing.Store(true)
997 if debug.dontfreezetheworld > 0 {
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 usleep(1000)
1023 return
1024 }
1025
1026
1027
1028
1029 for i := 0; i < 5; i++ {
1030
1031 sched.stopwait = freezeStopWait
1032 sched.gcwaiting.Store(true)
1033
1034 if !preemptall() {
1035 break
1036 }
1037 usleep(1000)
1038 }
1039
1040 usleep(1000)
1041 preemptall()
1042 usleep(1000)
1043 }
1044
1045
1046
1047
1048
1049 func readgstatus(gp *g) uint32 {
1050 return gp.atomicstatus.Load()
1051 }
1052
1053
1054
1055
1056
1057 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1058 success := false
1059
1060
1061 switch oldval {
1062 default:
1063 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1064 dumpgstatus(gp)
1065 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1066 case _Gscanrunnable,
1067 _Gscanwaiting,
1068 _Gscanrunning,
1069 _Gscansyscall,
1070 _Gscanpreempted:
1071 if newval == oldval&^_Gscan {
1072 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1073 }
1074 }
1075 if !success {
1076 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1077 dumpgstatus(gp)
1078 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1079 }
1080 releaseLockRankAndM(lockRankGscan)
1081 }
1082
1083
1084
1085 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1086 switch oldval {
1087 case _Grunnable,
1088 _Grunning,
1089 _Gwaiting,
1090 _Gsyscall:
1091 if newval == oldval|_Gscan {
1092 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1093 if r {
1094 acquireLockRankAndM(lockRankGscan)
1095 }
1096 return r
1097
1098 }
1099 }
1100 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1101 throw("castogscanstatus")
1102 panic("not reached")
1103 }
1104
1105
1106
1107 var casgstatusAlwaysTrack = false
1108
1109
1110
1111
1112
1113
1114
1115 func casgstatus(gp *g, oldval, newval uint32) {
1116 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1117 systemstack(func() {
1118
1119
1120 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1121 throw("casgstatus: bad incoming values")
1122 })
1123 }
1124
1125 lockWithRankMayAcquire(nil, lockRankGscan)
1126
1127
1128 const yieldDelay = 5 * 1000
1129 var nextYield int64
1130
1131
1132
1133 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1134 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1135 systemstack(func() {
1136
1137
1138 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1139 })
1140 }
1141 if i == 0 {
1142 nextYield = nanotime() + yieldDelay
1143 }
1144 if nanotime() < nextYield {
1145 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1146 procyield(1)
1147 }
1148 } else {
1149 osyield()
1150 nextYield = nanotime() + yieldDelay/2
1151 }
1152 }
1153
1154 if oldval == _Grunning {
1155
1156 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1157 gp.tracking = true
1158 }
1159 gp.trackingSeq++
1160 }
1161 if !gp.tracking {
1162 return
1163 }
1164
1165
1166
1167
1168
1169
1170 switch oldval {
1171 case _Grunnable:
1172
1173
1174
1175 now := nanotime()
1176 gp.runnableTime += now - gp.trackingStamp
1177 gp.trackingStamp = 0
1178 case _Gwaiting:
1179 if !gp.waitreason.isMutexWait() {
1180
1181 break
1182 }
1183
1184
1185
1186
1187
1188 now := nanotime()
1189 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1190 gp.trackingStamp = 0
1191 }
1192 switch newval {
1193 case _Gwaiting:
1194 if !gp.waitreason.isMutexWait() {
1195
1196 break
1197 }
1198
1199 now := nanotime()
1200 gp.trackingStamp = now
1201 case _Grunnable:
1202
1203
1204 now := nanotime()
1205 gp.trackingStamp = now
1206 case _Grunning:
1207
1208
1209
1210 gp.tracking = false
1211 sched.timeToRun.record(gp.runnableTime)
1212 gp.runnableTime = 0
1213 }
1214 }
1215
1216
1217
1218
1219 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1220
1221 gp.waitreason = reason
1222 casgstatus(gp, old, _Gwaiting)
1223 }
1224
1225
1226
1227
1228
1229 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1230 if !reason.isWaitingForGC() {
1231 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1232 }
1233 casGToWaiting(gp, old, reason)
1234 }
1235
1236
1237
1238
1239
1240
1241
1242
1243 func casgcopystack(gp *g) uint32 {
1244 for {
1245 oldstatus := readgstatus(gp) &^ _Gscan
1246 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1247 throw("copystack: bad status, not Gwaiting or Grunnable")
1248 }
1249 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1250 return oldstatus
1251 }
1252 }
1253 }
1254
1255
1256
1257
1258
1259 func casGToPreemptScan(gp *g, old, new uint32) {
1260 if old != _Grunning || new != _Gscan|_Gpreempted {
1261 throw("bad g transition")
1262 }
1263 acquireLockRankAndM(lockRankGscan)
1264 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1265 }
1266 }
1267
1268
1269
1270
1271 func casGFromPreempted(gp *g, old, new uint32) bool {
1272 if old != _Gpreempted || new != _Gwaiting {
1273 throw("bad g transition")
1274 }
1275 gp.waitreason = waitReasonPreempted
1276 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1277 }
1278
1279
1280 type stwReason uint8
1281
1282
1283
1284
1285 const (
1286 stwUnknown stwReason = iota
1287 stwGCMarkTerm
1288 stwGCSweepTerm
1289 stwWriteHeapDump
1290 stwGoroutineProfile
1291 stwGoroutineProfileCleanup
1292 stwAllGoroutinesStack
1293 stwReadMemStats
1294 stwAllThreadsSyscall
1295 stwGOMAXPROCS
1296 stwStartTrace
1297 stwStopTrace
1298 stwForTestCountPagesInUse
1299 stwForTestReadMetricsSlow
1300 stwForTestReadMemStatsSlow
1301 stwForTestPageCachePagesLeaked
1302 stwForTestResetDebugLog
1303 )
1304
1305 func (r stwReason) String() string {
1306 return stwReasonStrings[r]
1307 }
1308
1309 func (r stwReason) isGC() bool {
1310 return r == stwGCMarkTerm || r == stwGCSweepTerm
1311 }
1312
1313
1314
1315
1316 var stwReasonStrings = [...]string{
1317 stwUnknown: "unknown",
1318 stwGCMarkTerm: "GC mark termination",
1319 stwGCSweepTerm: "GC sweep termination",
1320 stwWriteHeapDump: "write heap dump",
1321 stwGoroutineProfile: "goroutine profile",
1322 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1323 stwAllGoroutinesStack: "all goroutines stack trace",
1324 stwReadMemStats: "read mem stats",
1325 stwAllThreadsSyscall: "AllThreadsSyscall",
1326 stwGOMAXPROCS: "GOMAXPROCS",
1327 stwStartTrace: "start trace",
1328 stwStopTrace: "stop trace",
1329 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1330 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1331 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1332 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1333 stwForTestResetDebugLog: "ResetDebugLog (test)",
1334 }
1335
1336
1337
1338 type worldStop struct {
1339 reason stwReason
1340 startedStopping int64
1341 finishedStopping int64
1342 stoppingCPUTime int64
1343 }
1344
1345
1346
1347
1348 var stopTheWorldContext worldStop
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 func stopTheWorld(reason stwReason) worldStop {
1368 semacquire(&worldsema)
1369 gp := getg()
1370 gp.m.preemptoff = reason.String()
1371 systemstack(func() {
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1387 stopTheWorldContext = stopTheWorldWithSema(reason)
1388 casgstatus(gp, _Gwaiting, _Grunning)
1389 })
1390 return stopTheWorldContext
1391 }
1392
1393
1394
1395
1396 func startTheWorld(w worldStop) {
1397 systemstack(func() { startTheWorldWithSema(0, w) })
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 mp := acquirem()
1415 mp.preemptoff = ""
1416 semrelease1(&worldsema, true, 0)
1417 releasem(mp)
1418 }
1419
1420
1421
1422
1423 func stopTheWorldGC(reason stwReason) worldStop {
1424 semacquire(&gcsema)
1425 return stopTheWorld(reason)
1426 }
1427
1428
1429
1430
1431 func startTheWorldGC(w worldStop) {
1432 startTheWorld(w)
1433 semrelease(&gcsema)
1434 }
1435
1436
1437 var worldsema uint32 = 1
1438
1439
1440
1441
1442
1443
1444
1445 var gcsema uint32 = 1
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 func stopTheWorldWithSema(reason stwReason) worldStop {
1478 trace := traceAcquire()
1479 if trace.ok() {
1480 trace.STWStart(reason)
1481 traceRelease(trace)
1482 }
1483 gp := getg()
1484
1485
1486
1487 if gp.m.locks > 0 {
1488 throw("stopTheWorld: holding locks")
1489 }
1490
1491 lock(&sched.lock)
1492 start := nanotime()
1493 sched.stopwait = gomaxprocs
1494 sched.gcwaiting.Store(true)
1495 preemptall()
1496
1497 gp.m.p.ptr().status = _Pgcstop
1498 gp.m.p.ptr().gcStopTime = start
1499 sched.stopwait--
1500
1501 trace = traceAcquire()
1502 for _, pp := range allp {
1503 s := pp.status
1504 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1505 if trace.ok() {
1506 trace.ProcSteal(pp, false)
1507 }
1508 pp.syscalltick++
1509 pp.gcStopTime = nanotime()
1510 sched.stopwait--
1511 }
1512 }
1513 if trace.ok() {
1514 traceRelease(trace)
1515 }
1516
1517
1518 now := nanotime()
1519 for {
1520 pp, _ := pidleget(now)
1521 if pp == nil {
1522 break
1523 }
1524 pp.status = _Pgcstop
1525 pp.gcStopTime = nanotime()
1526 sched.stopwait--
1527 }
1528 wait := sched.stopwait > 0
1529 unlock(&sched.lock)
1530
1531
1532 if wait {
1533 for {
1534
1535 if notetsleep(&sched.stopnote, 100*1000) {
1536 noteclear(&sched.stopnote)
1537 break
1538 }
1539 preemptall()
1540 }
1541 }
1542
1543 finish := nanotime()
1544 startTime := finish - start
1545 if reason.isGC() {
1546 sched.stwStoppingTimeGC.record(startTime)
1547 } else {
1548 sched.stwStoppingTimeOther.record(startTime)
1549 }
1550
1551
1552
1553
1554
1555 stoppingCPUTime := int64(0)
1556 bad := ""
1557 if sched.stopwait != 0 {
1558 bad = "stopTheWorld: not stopped (stopwait != 0)"
1559 } else {
1560 for _, pp := range allp {
1561 if pp.status != _Pgcstop {
1562 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1563 }
1564 if pp.gcStopTime == 0 && bad == "" {
1565 bad = "stopTheWorld: broken CPU time accounting"
1566 }
1567 stoppingCPUTime += finish - pp.gcStopTime
1568 pp.gcStopTime = 0
1569 }
1570 }
1571 if freezing.Load() {
1572
1573
1574
1575
1576 lock(&deadlock)
1577 lock(&deadlock)
1578 }
1579 if bad != "" {
1580 throw(bad)
1581 }
1582
1583 worldStopped()
1584
1585 return worldStop{
1586 reason: reason,
1587 startedStopping: start,
1588 finishedStopping: finish,
1589 stoppingCPUTime: stoppingCPUTime,
1590 }
1591 }
1592
1593
1594
1595
1596
1597
1598
1599 func startTheWorldWithSema(now int64, w worldStop) int64 {
1600 assertWorldStopped()
1601
1602 mp := acquirem()
1603 if netpollinited() {
1604 list, delta := netpoll(0)
1605 injectglist(&list)
1606 netpollAdjustWaiters(delta)
1607 }
1608 lock(&sched.lock)
1609
1610 procs := gomaxprocs
1611 if newprocs != 0 {
1612 procs = newprocs
1613 newprocs = 0
1614 }
1615 p1 := procresize(procs)
1616 sched.gcwaiting.Store(false)
1617 if sched.sysmonwait.Load() {
1618 sched.sysmonwait.Store(false)
1619 notewakeup(&sched.sysmonnote)
1620 }
1621 unlock(&sched.lock)
1622
1623 worldStarted()
1624
1625 for p1 != nil {
1626 p := p1
1627 p1 = p1.link.ptr()
1628 if p.m != 0 {
1629 mp := p.m.ptr()
1630 p.m = 0
1631 if mp.nextp != 0 {
1632 throw("startTheWorld: inconsistent mp->nextp")
1633 }
1634 mp.nextp.set(p)
1635 notewakeup(&mp.park)
1636 } else {
1637
1638 newm(nil, p, -1)
1639 }
1640 }
1641
1642
1643 if now == 0 {
1644 now = nanotime()
1645 }
1646 totalTime := now - w.startedStopping
1647 if w.reason.isGC() {
1648 sched.stwTotalTimeGC.record(totalTime)
1649 } else {
1650 sched.stwTotalTimeOther.record(totalTime)
1651 }
1652 trace := traceAcquire()
1653 if trace.ok() {
1654 trace.STWDone()
1655 traceRelease(trace)
1656 }
1657
1658
1659
1660
1661 wakep()
1662
1663 releasem(mp)
1664
1665 return now
1666 }
1667
1668
1669
1670 func usesLibcall() bool {
1671 switch GOOS {
1672 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1673 return true
1674 case "openbsd":
1675 return GOARCH != "mips64"
1676 }
1677 return false
1678 }
1679
1680
1681
1682 func mStackIsSystemAllocated() bool {
1683 switch GOOS {
1684 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1685 return true
1686 case "openbsd":
1687 return GOARCH != "mips64"
1688 }
1689 return false
1690 }
1691
1692
1693
1694 func mstart()
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 func mstart0() {
1706 gp := getg()
1707
1708 osStack := gp.stack.lo == 0
1709 if osStack {
1710
1711
1712
1713
1714
1715
1716
1717
1718 size := gp.stack.hi
1719 if size == 0 {
1720 size = 16384 * sys.StackGuardMultiplier
1721 }
1722 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1723 gp.stack.lo = gp.stack.hi - size + 1024
1724 }
1725
1726
1727 gp.stackguard0 = gp.stack.lo + stackGuard
1728
1729
1730 gp.stackguard1 = gp.stackguard0
1731 mstart1()
1732
1733
1734 if mStackIsSystemAllocated() {
1735
1736
1737
1738 osStack = true
1739 }
1740 mexit(osStack)
1741 }
1742
1743
1744
1745
1746
1747 func mstart1() {
1748 gp := getg()
1749
1750 if gp != gp.m.g0 {
1751 throw("bad runtime·mstart")
1752 }
1753
1754
1755
1756
1757
1758
1759
1760 gp.sched.g = guintptr(unsafe.Pointer(gp))
1761 gp.sched.pc = getcallerpc()
1762 gp.sched.sp = getcallersp()
1763
1764 asminit()
1765 minit()
1766
1767
1768
1769 if gp.m == &m0 {
1770 mstartm0()
1771 }
1772
1773 if fn := gp.m.mstartfn; fn != nil {
1774 fn()
1775 }
1776
1777 if gp.m != &m0 {
1778 acquirep(gp.m.nextp.ptr())
1779 gp.m.nextp = 0
1780 }
1781 schedule()
1782 }
1783
1784
1785
1786
1787
1788
1789
1790 func mstartm0() {
1791
1792
1793
1794 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1795 cgoHasExtraM = true
1796 newextram()
1797 }
1798 initsig(false)
1799 }
1800
1801
1802
1803
1804 func mPark() {
1805 gp := getg()
1806 notesleep(&gp.m.park)
1807 noteclear(&gp.m.park)
1808 }
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 func mexit(osStack bool) {
1821 mp := getg().m
1822
1823 if mp == &m0 {
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835 handoffp(releasep())
1836 lock(&sched.lock)
1837 sched.nmfreed++
1838 checkdead()
1839 unlock(&sched.lock)
1840 mPark()
1841 throw("locked m0 woke up")
1842 }
1843
1844 sigblock(true)
1845 unminit()
1846
1847
1848 if mp.gsignal != nil {
1849 stackfree(mp.gsignal.stack)
1850
1851
1852
1853
1854 mp.gsignal = nil
1855 }
1856
1857
1858 lock(&sched.lock)
1859 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1860 if *pprev == mp {
1861 *pprev = mp.alllink
1862 goto found
1863 }
1864 }
1865 throw("m not found in allm")
1866 found:
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881 mp.freeWait.Store(freeMWait)
1882 mp.freelink = sched.freem
1883 sched.freem = mp
1884 unlock(&sched.lock)
1885
1886 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1887 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1888
1889
1890 handoffp(releasep())
1891
1892
1893
1894
1895
1896 lock(&sched.lock)
1897 sched.nmfreed++
1898 checkdead()
1899 unlock(&sched.lock)
1900
1901 if GOOS == "darwin" || GOOS == "ios" {
1902
1903
1904 if mp.signalPending.Load() != 0 {
1905 pendingPreemptSignals.Add(-1)
1906 }
1907 }
1908
1909
1910
1911 mdestroy(mp)
1912
1913 if osStack {
1914
1915 mp.freeWait.Store(freeMRef)
1916
1917
1918
1919 return
1920 }
1921
1922
1923
1924
1925
1926 exitThread(&mp.freeWait)
1927 }
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 func forEachP(reason waitReason, fn func(*p)) {
1940 systemstack(func() {
1941 gp := getg().m.curg
1942
1943
1944
1945
1946
1947
1948
1949
1950 casGToWaitingForGC(gp, _Grunning, reason)
1951 forEachPInternal(fn)
1952 casgstatus(gp, _Gwaiting, _Grunning)
1953 })
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965 func forEachPInternal(fn func(*p)) {
1966 mp := acquirem()
1967 pp := getg().m.p.ptr()
1968
1969 lock(&sched.lock)
1970 if sched.safePointWait != 0 {
1971 throw("forEachP: sched.safePointWait != 0")
1972 }
1973 sched.safePointWait = gomaxprocs - 1
1974 sched.safePointFn = fn
1975
1976
1977 for _, p2 := range allp {
1978 if p2 != pp {
1979 atomic.Store(&p2.runSafePointFn, 1)
1980 }
1981 }
1982 preemptall()
1983
1984
1985
1986
1987
1988
1989
1990 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1991 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1992 fn(p)
1993 sched.safePointWait--
1994 }
1995 }
1996
1997 wait := sched.safePointWait > 0
1998 unlock(&sched.lock)
1999
2000
2001 fn(pp)
2002
2003
2004
2005 for _, p2 := range allp {
2006 s := p2.status
2007
2008
2009
2010 trace := traceAcquire()
2011 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2012 if trace.ok() {
2013
2014 trace.ProcSteal(p2, false)
2015 traceRelease(trace)
2016 }
2017 p2.syscalltick++
2018 handoffp(p2)
2019 } else if trace.ok() {
2020 traceRelease(trace)
2021 }
2022 }
2023
2024
2025 if wait {
2026 for {
2027
2028
2029
2030
2031 if notetsleep(&sched.safePointNote, 100*1000) {
2032 noteclear(&sched.safePointNote)
2033 break
2034 }
2035 preemptall()
2036 }
2037 }
2038 if sched.safePointWait != 0 {
2039 throw("forEachP: not done")
2040 }
2041 for _, p2 := range allp {
2042 if p2.runSafePointFn != 0 {
2043 throw("forEachP: P did not run fn")
2044 }
2045 }
2046
2047 lock(&sched.lock)
2048 sched.safePointFn = nil
2049 unlock(&sched.lock)
2050 releasem(mp)
2051 }
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 func runSafePointFn() {
2065 p := getg().m.p.ptr()
2066
2067
2068
2069 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2070 return
2071 }
2072 sched.safePointFn(p)
2073 lock(&sched.lock)
2074 sched.safePointWait--
2075 if sched.safePointWait == 0 {
2076 notewakeup(&sched.safePointNote)
2077 }
2078 unlock(&sched.lock)
2079 }
2080
2081
2082
2083
2084 var cgoThreadStart unsafe.Pointer
2085
2086 type cgothreadstart struct {
2087 g guintptr
2088 tls *uint64
2089 fn unsafe.Pointer
2090 }
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101 func allocm(pp *p, fn func(), id int64) *m {
2102 allocmLock.rlock()
2103
2104
2105
2106
2107 acquirem()
2108
2109 gp := getg()
2110 if gp.m.p == 0 {
2111 acquirep(pp)
2112 }
2113
2114
2115
2116 if sched.freem != nil {
2117 lock(&sched.lock)
2118 var newList *m
2119 for freem := sched.freem; freem != nil; {
2120
2121 wait := freem.freeWait.Load()
2122 if wait == freeMWait {
2123 next := freem.freelink
2124 freem.freelink = newList
2125 newList = freem
2126 freem = next
2127 continue
2128 }
2129
2130
2131
2132 if traceEnabled() || traceShuttingDown() {
2133 traceThreadDestroy(freem)
2134 }
2135
2136
2137
2138 if wait == freeMStack {
2139
2140
2141
2142 systemstack(func() {
2143 stackfree(freem.g0.stack)
2144 })
2145 }
2146 freem = freem.freelink
2147 }
2148 sched.freem = newList
2149 unlock(&sched.lock)
2150 }
2151
2152 mp := new(m)
2153 mp.mstartfn = fn
2154 mcommoninit(mp, id)
2155
2156
2157
2158 if iscgo || mStackIsSystemAllocated() {
2159 mp.g0 = malg(-1)
2160 } else {
2161 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2162 }
2163 mp.g0.m = mp
2164
2165 if pp == gp.m.p.ptr() {
2166 releasep()
2167 }
2168
2169 releasem(gp.m)
2170 allocmLock.runlock()
2171 return mp
2172 }
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213 func needm(signal bool) {
2214 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2215
2216
2217
2218
2219
2220
2221 writeErrStr("fatal error: cgo callback before cgo call\n")
2222 exit(1)
2223 }
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233 var sigmask sigset
2234 sigsave(&sigmask)
2235 sigblock(false)
2236
2237
2238
2239
2240 mp, last := getExtraM()
2241
2242
2243
2244
2245
2246
2247
2248
2249 mp.needextram = last
2250
2251
2252 mp.sigmask = sigmask
2253
2254
2255
2256 osSetupTLS(mp)
2257
2258
2259
2260 setg(mp.g0)
2261 sp := getcallersp()
2262 callbackUpdateSystemStack(mp, sp, signal)
2263
2264
2265
2266
2267 mp.isExtraInC = false
2268
2269
2270 asminit()
2271 minit()
2272
2273
2274
2275
2276
2277
2278 var trace traceLocker
2279 if !signal {
2280 trace = traceAcquire()
2281 }
2282
2283
2284 casgstatus(mp.curg, _Gdead, _Gsyscall)
2285 sched.ngsys.Add(-1)
2286
2287 if !signal {
2288 if trace.ok() {
2289 trace.GoCreateSyscall(mp.curg)
2290 traceRelease(trace)
2291 }
2292 }
2293 mp.isExtraInSig = signal
2294 }
2295
2296
2297
2298
2299 func needAndBindM() {
2300 needm(false)
2301
2302 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2303 cgoBindM()
2304 }
2305 }
2306
2307
2308
2309
2310 func newextram() {
2311 c := extraMWaiters.Swap(0)
2312 if c > 0 {
2313 for i := uint32(0); i < c; i++ {
2314 oneNewExtraM()
2315 }
2316 } else if extraMLength.Load() == 0 {
2317
2318 oneNewExtraM()
2319 }
2320 }
2321
2322
2323 func oneNewExtraM() {
2324
2325
2326
2327
2328
2329 mp := allocm(nil, nil, -1)
2330 gp := malg(4096)
2331 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2332 gp.sched.sp = gp.stack.hi
2333 gp.sched.sp -= 4 * goarch.PtrSize
2334 gp.sched.lr = 0
2335 gp.sched.g = guintptr(unsafe.Pointer(gp))
2336 gp.syscallpc = gp.sched.pc
2337 gp.syscallsp = gp.sched.sp
2338 gp.stktopsp = gp.sched.sp
2339
2340
2341
2342
2343 casgstatus(gp, _Gidle, _Gdead)
2344 gp.m = mp
2345 mp.curg = gp
2346 mp.isextra = true
2347
2348 mp.isExtraInC = true
2349 mp.lockedInt++
2350 mp.lockedg.set(gp)
2351 gp.lockedm.set(mp)
2352 gp.goid = sched.goidgen.Add(1)
2353 if raceenabled {
2354 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2355 }
2356
2357 allgadd(gp)
2358
2359
2360
2361
2362
2363 sched.ngsys.Add(1)
2364
2365
2366 addExtraM(mp)
2367 }
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402 func dropm() {
2403
2404
2405
2406 mp := getg().m
2407
2408
2409
2410
2411
2412 var trace traceLocker
2413 if !mp.isExtraInSig {
2414 trace = traceAcquire()
2415 }
2416
2417
2418 casgstatus(mp.curg, _Gsyscall, _Gdead)
2419 mp.curg.preemptStop = false
2420 sched.ngsys.Add(1)
2421
2422 if !mp.isExtraInSig {
2423 if trace.ok() {
2424 trace.GoDestroySyscall()
2425 traceRelease(trace)
2426 }
2427 }
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442 mp.syscalltick--
2443
2444
2445
2446 mp.curg.trace.reset()
2447
2448
2449
2450
2451 if traceEnabled() || traceShuttingDown() {
2452
2453
2454
2455
2456
2457
2458
2459 lock(&sched.lock)
2460 traceThreadDestroy(mp)
2461 unlock(&sched.lock)
2462 }
2463 mp.isExtraInSig = false
2464
2465
2466
2467
2468
2469 sigmask := mp.sigmask
2470 sigblock(false)
2471 unminit()
2472
2473 setg(nil)
2474
2475
2476
2477 g0 := mp.g0
2478 g0.stack.hi = 0
2479 g0.stack.lo = 0
2480 g0.stackguard0 = 0
2481 g0.stackguard1 = 0
2482
2483 putExtraM(mp)
2484
2485 msigrestore(sigmask)
2486 }
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508 func cgoBindM() {
2509 if GOOS == "windows" || GOOS == "plan9" {
2510 fatal("bindm in unexpected GOOS")
2511 }
2512 g := getg()
2513 if g.m.g0 != g {
2514 fatal("the current g is not g0")
2515 }
2516 if _cgo_bindm != nil {
2517 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2518 }
2519 }
2520
2521
2522 func getm() uintptr {
2523 return uintptr(unsafe.Pointer(getg().m))
2524 }
2525
2526 var (
2527
2528
2529
2530
2531
2532
2533 extraM atomic.Uintptr
2534
2535 extraMLength atomic.Uint32
2536
2537 extraMWaiters atomic.Uint32
2538
2539
2540 extraMInUse atomic.Uint32
2541 )
2542
2543
2544
2545
2546
2547
2548
2549
2550 func lockextra(nilokay bool) *m {
2551 const locked = 1
2552
2553 incr := false
2554 for {
2555 old := extraM.Load()
2556 if old == locked {
2557 osyield_no_g()
2558 continue
2559 }
2560 if old == 0 && !nilokay {
2561 if !incr {
2562
2563
2564
2565 extraMWaiters.Add(1)
2566 incr = true
2567 }
2568 usleep_no_g(1)
2569 continue
2570 }
2571 if extraM.CompareAndSwap(old, locked) {
2572 return (*m)(unsafe.Pointer(old))
2573 }
2574 osyield_no_g()
2575 continue
2576 }
2577 }
2578
2579
2580 func unlockextra(mp *m, delta int32) {
2581 extraMLength.Add(delta)
2582 extraM.Store(uintptr(unsafe.Pointer(mp)))
2583 }
2584
2585
2586
2587
2588
2589
2590
2591
2592 func getExtraM() (mp *m, last bool) {
2593 mp = lockextra(false)
2594 extraMInUse.Add(1)
2595 unlockextra(mp.schedlink.ptr(), -1)
2596 return mp, mp.schedlink.ptr() == nil
2597 }
2598
2599
2600
2601
2602
2603 func putExtraM(mp *m) {
2604 extraMInUse.Add(-1)
2605 addExtraM(mp)
2606 }
2607
2608
2609
2610
2611 func addExtraM(mp *m) {
2612 mnext := lockextra(true)
2613 mp.schedlink.set(mnext)
2614 unlockextra(mp, 1)
2615 }
2616
2617 var (
2618
2619
2620
2621 allocmLock rwmutex
2622
2623
2624
2625
2626 execLock rwmutex
2627 )
2628
2629
2630
2631 const (
2632 failthreadcreate = "runtime: failed to create new OS thread\n"
2633 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2634 )
2635
2636
2637
2638
2639 var newmHandoff struct {
2640 lock mutex
2641
2642
2643
2644 newm muintptr
2645
2646
2647
2648 waiting bool
2649 wake note
2650
2651
2652
2653
2654 haveTemplateThread uint32
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664 func newm(fn func(), pp *p, id int64) {
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675 acquirem()
2676
2677 mp := allocm(pp, fn, id)
2678 mp.nextp.set(pp)
2679 mp.sigmask = initSigmask
2680 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692 lock(&newmHandoff.lock)
2693 if newmHandoff.haveTemplateThread == 0 {
2694 throw("on a locked thread with no template thread")
2695 }
2696 mp.schedlink = newmHandoff.newm
2697 newmHandoff.newm.set(mp)
2698 if newmHandoff.waiting {
2699 newmHandoff.waiting = false
2700 notewakeup(&newmHandoff.wake)
2701 }
2702 unlock(&newmHandoff.lock)
2703
2704
2705
2706 releasem(getg().m)
2707 return
2708 }
2709 newm1(mp)
2710 releasem(getg().m)
2711 }
2712
2713 func newm1(mp *m) {
2714 if iscgo {
2715 var ts cgothreadstart
2716 if _cgo_thread_start == nil {
2717 throw("_cgo_thread_start missing")
2718 }
2719 ts.g.set(mp.g0)
2720 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2721 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2722 if msanenabled {
2723 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2724 }
2725 if asanenabled {
2726 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2727 }
2728 execLock.rlock()
2729 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2730 execLock.runlock()
2731 return
2732 }
2733 execLock.rlock()
2734 newosproc(mp)
2735 execLock.runlock()
2736 }
2737
2738
2739
2740
2741
2742 func startTemplateThread() {
2743 if GOARCH == "wasm" {
2744 return
2745 }
2746
2747
2748
2749 mp := acquirem()
2750 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2751 releasem(mp)
2752 return
2753 }
2754 newm(templateThread, nil, -1)
2755 releasem(mp)
2756 }
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770 func templateThread() {
2771 lock(&sched.lock)
2772 sched.nmsys++
2773 checkdead()
2774 unlock(&sched.lock)
2775
2776 for {
2777 lock(&newmHandoff.lock)
2778 for newmHandoff.newm != 0 {
2779 newm := newmHandoff.newm.ptr()
2780 newmHandoff.newm = 0
2781 unlock(&newmHandoff.lock)
2782 for newm != nil {
2783 next := newm.schedlink.ptr()
2784 newm.schedlink = 0
2785 newm1(newm)
2786 newm = next
2787 }
2788 lock(&newmHandoff.lock)
2789 }
2790 newmHandoff.waiting = true
2791 noteclear(&newmHandoff.wake)
2792 unlock(&newmHandoff.lock)
2793 notesleep(&newmHandoff.wake)
2794 }
2795 }
2796
2797
2798
2799 func stopm() {
2800 gp := getg()
2801
2802 if gp.m.locks != 0 {
2803 throw("stopm holding locks")
2804 }
2805 if gp.m.p != 0 {
2806 throw("stopm holding p")
2807 }
2808 if gp.m.spinning {
2809 throw("stopm spinning")
2810 }
2811
2812 lock(&sched.lock)
2813 mput(gp.m)
2814 unlock(&sched.lock)
2815 mPark()
2816 acquirep(gp.m.nextp.ptr())
2817 gp.m.nextp = 0
2818 }
2819
2820 func mspinning() {
2821
2822 getg().m.spinning = true
2823 }
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842 func startm(pp *p, spinning, lockheld bool) {
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859 mp := acquirem()
2860 if !lockheld {
2861 lock(&sched.lock)
2862 }
2863 if pp == nil {
2864 if spinning {
2865
2866
2867
2868 throw("startm: P required for spinning=true")
2869 }
2870 pp, _ = pidleget(0)
2871 if pp == nil {
2872 if !lockheld {
2873 unlock(&sched.lock)
2874 }
2875 releasem(mp)
2876 return
2877 }
2878 }
2879 nmp := mget()
2880 if nmp == nil {
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895 id := mReserveID()
2896 unlock(&sched.lock)
2897
2898 var fn func()
2899 if spinning {
2900
2901 fn = mspinning
2902 }
2903 newm(fn, pp, id)
2904
2905 if lockheld {
2906 lock(&sched.lock)
2907 }
2908
2909
2910 releasem(mp)
2911 return
2912 }
2913 if !lockheld {
2914 unlock(&sched.lock)
2915 }
2916 if nmp.spinning {
2917 throw("startm: m is spinning")
2918 }
2919 if nmp.nextp != 0 {
2920 throw("startm: m has p")
2921 }
2922 if spinning && !runqempty(pp) {
2923 throw("startm: p has runnable gs")
2924 }
2925
2926 nmp.spinning = spinning
2927 nmp.nextp.set(pp)
2928 notewakeup(&nmp.park)
2929
2930
2931 releasem(mp)
2932 }
2933
2934
2935
2936
2937
2938 func handoffp(pp *p) {
2939
2940
2941
2942
2943 if !runqempty(pp) || sched.runqsize != 0 {
2944 startm(pp, false, false)
2945 return
2946 }
2947
2948 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
2949 startm(pp, false, false)
2950 return
2951 }
2952
2953 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
2954 startm(pp, false, false)
2955 return
2956 }
2957
2958
2959 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
2960 sched.needspinning.Store(0)
2961 startm(pp, true, false)
2962 return
2963 }
2964 lock(&sched.lock)
2965 if sched.gcwaiting.Load() {
2966 pp.status = _Pgcstop
2967 pp.gcStopTime = nanotime()
2968 sched.stopwait--
2969 if sched.stopwait == 0 {
2970 notewakeup(&sched.stopnote)
2971 }
2972 unlock(&sched.lock)
2973 return
2974 }
2975 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
2976 sched.safePointFn(pp)
2977 sched.safePointWait--
2978 if sched.safePointWait == 0 {
2979 notewakeup(&sched.safePointNote)
2980 }
2981 }
2982 if sched.runqsize != 0 {
2983 unlock(&sched.lock)
2984 startm(pp, false, false)
2985 return
2986 }
2987
2988
2989 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
2990 unlock(&sched.lock)
2991 startm(pp, false, false)
2992 return
2993 }
2994
2995
2996
2997 when := pp.timers.wakeTime()
2998 pidleput(pp, 0)
2999 unlock(&sched.lock)
3000
3001 if when != 0 {
3002 wakeNetPoller(when)
3003 }
3004 }
3005
3006
3007
3008
3009 func wakep() {
3010
3011
3012 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3013 return
3014 }
3015
3016
3017
3018
3019
3020
3021 mp := acquirem()
3022
3023 var pp *p
3024 lock(&sched.lock)
3025 pp, _ = pidlegetSpinning(0)
3026 if pp == nil {
3027 if sched.nmspinning.Add(-1) < 0 {
3028 throw("wakep: negative nmspinning")
3029 }
3030 unlock(&sched.lock)
3031 releasem(mp)
3032 return
3033 }
3034
3035
3036
3037
3038 unlock(&sched.lock)
3039
3040 startm(pp, true, false)
3041
3042 releasem(mp)
3043 }
3044
3045
3046
3047 func stoplockedm() {
3048 gp := getg()
3049
3050 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3051 throw("stoplockedm: inconsistent locking")
3052 }
3053 if gp.m.p != 0 {
3054
3055 pp := releasep()
3056 handoffp(pp)
3057 }
3058 incidlelocked(1)
3059
3060 mPark()
3061 status := readgstatus(gp.m.lockedg.ptr())
3062 if status&^_Gscan != _Grunnable {
3063 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3064 dumpgstatus(gp.m.lockedg.ptr())
3065 throw("stoplockedm: not runnable")
3066 }
3067 acquirep(gp.m.nextp.ptr())
3068 gp.m.nextp = 0
3069 }
3070
3071
3072
3073
3074
3075 func startlockedm(gp *g) {
3076 mp := gp.lockedm.ptr()
3077 if mp == getg().m {
3078 throw("startlockedm: locked to me")
3079 }
3080 if mp.nextp != 0 {
3081 throw("startlockedm: m has p")
3082 }
3083
3084 incidlelocked(-1)
3085 pp := releasep()
3086 mp.nextp.set(pp)
3087 notewakeup(&mp.park)
3088 stopm()
3089 }
3090
3091
3092
3093 func gcstopm() {
3094 gp := getg()
3095
3096 if !sched.gcwaiting.Load() {
3097 throw("gcstopm: not waiting for gc")
3098 }
3099 if gp.m.spinning {
3100 gp.m.spinning = false
3101
3102
3103 if sched.nmspinning.Add(-1) < 0 {
3104 throw("gcstopm: negative nmspinning")
3105 }
3106 }
3107 pp := releasep()
3108 lock(&sched.lock)
3109 pp.status = _Pgcstop
3110 pp.gcStopTime = nanotime()
3111 sched.stopwait--
3112 if sched.stopwait == 0 {
3113 notewakeup(&sched.stopnote)
3114 }
3115 unlock(&sched.lock)
3116 stopm()
3117 }
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128 func execute(gp *g, inheritTime bool) {
3129 mp := getg().m
3130
3131 if goroutineProfile.active {
3132
3133
3134
3135 tryRecordGoroutineProfile(gp, osyield)
3136 }
3137
3138
3139
3140 mp.curg = gp
3141 gp.m = mp
3142 casgstatus(gp, _Grunnable, _Grunning)
3143 gp.waitsince = 0
3144 gp.preempt = false
3145 gp.stackguard0 = gp.stack.lo + stackGuard
3146 if !inheritTime {
3147 mp.p.ptr().schedtick++
3148 }
3149
3150
3151 hz := sched.profilehz
3152 if mp.profilehz != hz {
3153 setThreadCPUProfiler(hz)
3154 }
3155
3156 trace := traceAcquire()
3157 if trace.ok() {
3158 trace.GoStart()
3159 traceRelease(trace)
3160 }
3161
3162 gogo(&gp.sched)
3163 }
3164
3165
3166
3167
3168
3169 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3170 mp := getg().m
3171
3172
3173
3174
3175
3176 top:
3177 pp := mp.p.ptr()
3178 if sched.gcwaiting.Load() {
3179 gcstopm()
3180 goto top
3181 }
3182 if pp.runSafePointFn != 0 {
3183 runSafePointFn()
3184 }
3185
3186
3187
3188
3189
3190 now, pollUntil, _ := pp.timers.check(0)
3191
3192
3193 if traceEnabled() || traceShuttingDown() {
3194 gp := traceReader()
3195 if gp != nil {
3196 trace := traceAcquire()
3197 casgstatus(gp, _Gwaiting, _Grunnable)
3198 if trace.ok() {
3199 trace.GoUnpark(gp, 0)
3200 traceRelease(trace)
3201 }
3202 return gp, false, true
3203 }
3204 }
3205
3206
3207 if gcBlackenEnabled != 0 {
3208 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3209 if gp != nil {
3210 return gp, false, true
3211 }
3212 now = tnow
3213 }
3214
3215
3216
3217
3218 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3219 lock(&sched.lock)
3220 gp := globrunqget(pp, 1)
3221 unlock(&sched.lock)
3222 if gp != nil {
3223 return gp, false, false
3224 }
3225 }
3226
3227
3228 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3229 if gp := wakefing(); gp != nil {
3230 ready(gp, 0, true)
3231 }
3232 }
3233 if *cgo_yield != nil {
3234 asmcgocall(*cgo_yield, nil)
3235 }
3236
3237
3238 if gp, inheritTime := runqget(pp); gp != nil {
3239 return gp, inheritTime, false
3240 }
3241
3242
3243 if sched.runqsize != 0 {
3244 lock(&sched.lock)
3245 gp := globrunqget(pp, 0)
3246 unlock(&sched.lock)
3247 if gp != nil {
3248 return gp, false, false
3249 }
3250 }
3251
3252
3253
3254
3255
3256
3257
3258
3259 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3260 if list, delta := netpoll(0); !list.empty() {
3261 gp := list.pop()
3262 injectglist(&list)
3263 netpollAdjustWaiters(delta)
3264 trace := traceAcquire()
3265 casgstatus(gp, _Gwaiting, _Grunnable)
3266 if trace.ok() {
3267 trace.GoUnpark(gp, 0)
3268 traceRelease(trace)
3269 }
3270 return gp, false, false
3271 }
3272 }
3273
3274
3275
3276
3277
3278
3279 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3280 if !mp.spinning {
3281 mp.becomeSpinning()
3282 }
3283
3284 gp, inheritTime, tnow, w, newWork := stealWork(now)
3285 if gp != nil {
3286
3287 return gp, inheritTime, false
3288 }
3289 if newWork {
3290
3291
3292 goto top
3293 }
3294
3295 now = tnow
3296 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3297
3298 pollUntil = w
3299 }
3300 }
3301
3302
3303
3304
3305
3306 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3307 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3308 if node != nil {
3309 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3310 gp := node.gp.ptr()
3311
3312 trace := traceAcquire()
3313 casgstatus(gp, _Gwaiting, _Grunnable)
3314 if trace.ok() {
3315 trace.GoUnpark(gp, 0)
3316 traceRelease(trace)
3317 }
3318 return gp, false, false
3319 }
3320 gcController.removeIdleMarkWorker()
3321 }
3322
3323
3324
3325
3326
3327 gp, otherReady := beforeIdle(now, pollUntil)
3328 if gp != nil {
3329 trace := traceAcquire()
3330 casgstatus(gp, _Gwaiting, _Grunnable)
3331 if trace.ok() {
3332 trace.GoUnpark(gp, 0)
3333 traceRelease(trace)
3334 }
3335 return gp, false, false
3336 }
3337 if otherReady {
3338 goto top
3339 }
3340
3341
3342
3343
3344
3345 allpSnapshot := allp
3346
3347
3348 idlepMaskSnapshot := idlepMask
3349 timerpMaskSnapshot := timerpMask
3350
3351
3352 lock(&sched.lock)
3353 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3354 unlock(&sched.lock)
3355 goto top
3356 }
3357 if sched.runqsize != 0 {
3358 gp := globrunqget(pp, 0)
3359 unlock(&sched.lock)
3360 return gp, false, false
3361 }
3362 if !mp.spinning && sched.needspinning.Load() == 1 {
3363
3364 mp.becomeSpinning()
3365 unlock(&sched.lock)
3366 goto top
3367 }
3368 if releasep() != pp {
3369 throw("findrunnable: wrong p")
3370 }
3371 now = pidleput(pp, now)
3372 unlock(&sched.lock)
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410 wasSpinning := mp.spinning
3411 if mp.spinning {
3412 mp.spinning = false
3413 if sched.nmspinning.Add(-1) < 0 {
3414 throw("findrunnable: negative nmspinning")
3415 }
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428 lock(&sched.lock)
3429 if sched.runqsize != 0 {
3430 pp, _ := pidlegetSpinning(0)
3431 if pp != nil {
3432 gp := globrunqget(pp, 0)
3433 if gp == nil {
3434 throw("global runq empty with non-zero runqsize")
3435 }
3436 unlock(&sched.lock)
3437 acquirep(pp)
3438 mp.becomeSpinning()
3439 return gp, false, false
3440 }
3441 }
3442 unlock(&sched.lock)
3443
3444 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3445 if pp != nil {
3446 acquirep(pp)
3447 mp.becomeSpinning()
3448 goto top
3449 }
3450
3451
3452 pp, gp := checkIdleGCNoP()
3453 if pp != nil {
3454 acquirep(pp)
3455 mp.becomeSpinning()
3456
3457
3458 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3459 trace := traceAcquire()
3460 casgstatus(gp, _Gwaiting, _Grunnable)
3461 if trace.ok() {
3462 trace.GoUnpark(gp, 0)
3463 traceRelease(trace)
3464 }
3465 return gp, false, false
3466 }
3467
3468
3469
3470
3471
3472
3473
3474 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3475 }
3476
3477
3478 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3479 sched.pollUntil.Store(pollUntil)
3480 if mp.p != 0 {
3481 throw("findrunnable: netpoll with p")
3482 }
3483 if mp.spinning {
3484 throw("findrunnable: netpoll with spinning")
3485 }
3486 delay := int64(-1)
3487 if pollUntil != 0 {
3488 if now == 0 {
3489 now = nanotime()
3490 }
3491 delay = pollUntil - now
3492 if delay < 0 {
3493 delay = 0
3494 }
3495 }
3496 if faketime != 0 {
3497
3498 delay = 0
3499 }
3500 list, delta := netpoll(delay)
3501
3502 now = nanotime()
3503 sched.pollUntil.Store(0)
3504 sched.lastpoll.Store(now)
3505 if faketime != 0 && list.empty() {
3506
3507
3508 stopm()
3509 goto top
3510 }
3511 lock(&sched.lock)
3512 pp, _ := pidleget(now)
3513 unlock(&sched.lock)
3514 if pp == nil {
3515 injectglist(&list)
3516 netpollAdjustWaiters(delta)
3517 } else {
3518 acquirep(pp)
3519 if !list.empty() {
3520 gp := list.pop()
3521 injectglist(&list)
3522 netpollAdjustWaiters(delta)
3523 trace := traceAcquire()
3524 casgstatus(gp, _Gwaiting, _Grunnable)
3525 if trace.ok() {
3526 trace.GoUnpark(gp, 0)
3527 traceRelease(trace)
3528 }
3529 return gp, false, false
3530 }
3531 if wasSpinning {
3532 mp.becomeSpinning()
3533 }
3534 goto top
3535 }
3536 } else if pollUntil != 0 && netpollinited() {
3537 pollerPollUntil := sched.pollUntil.Load()
3538 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3539 netpollBreak()
3540 }
3541 }
3542 stopm()
3543 goto top
3544 }
3545
3546
3547
3548
3549
3550 func pollWork() bool {
3551 if sched.runqsize != 0 {
3552 return true
3553 }
3554 p := getg().m.p.ptr()
3555 if !runqempty(p) {
3556 return true
3557 }
3558 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3559 if list, delta := netpoll(0); !list.empty() {
3560 injectglist(&list)
3561 netpollAdjustWaiters(delta)
3562 return true
3563 }
3564 }
3565 return false
3566 }
3567
3568
3569
3570
3571
3572
3573
3574 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3575 pp := getg().m.p.ptr()
3576
3577 ranTimer := false
3578
3579 const stealTries = 4
3580 for i := 0; i < stealTries; i++ {
3581 stealTimersOrRunNextG := i == stealTries-1
3582
3583 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3584 if sched.gcwaiting.Load() {
3585
3586 return nil, false, now, pollUntil, true
3587 }
3588 p2 := allp[enum.position()]
3589 if pp == p2 {
3590 continue
3591 }
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3607 tnow, w, ran := p2.timers.check(now)
3608 now = tnow
3609 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3610 pollUntil = w
3611 }
3612 if ran {
3613
3614
3615
3616
3617
3618
3619
3620
3621 if gp, inheritTime := runqget(pp); gp != nil {
3622 return gp, inheritTime, now, pollUntil, ranTimer
3623 }
3624 ranTimer = true
3625 }
3626 }
3627
3628
3629 if !idlepMask.read(enum.position()) {
3630 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3631 return gp, false, now, pollUntil, ranTimer
3632 }
3633 }
3634 }
3635 }
3636
3637
3638
3639
3640 return nil, false, now, pollUntil, ranTimer
3641 }
3642
3643
3644
3645
3646
3647
3648 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3649 for id, p2 := range allpSnapshot {
3650 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3651 lock(&sched.lock)
3652 pp, _ := pidlegetSpinning(0)
3653 if pp == nil {
3654
3655 unlock(&sched.lock)
3656 return nil
3657 }
3658 unlock(&sched.lock)
3659 return pp
3660 }
3661 }
3662
3663
3664 return nil
3665 }
3666
3667
3668
3669
3670 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3671 for id, p2 := range allpSnapshot {
3672 if timerpMaskSnapshot.read(uint32(id)) {
3673 w := p2.timers.wakeTime()
3674 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3675 pollUntil = w
3676 }
3677 }
3678 }
3679
3680 return pollUntil
3681 }
3682
3683
3684
3685
3686
3687 func checkIdleGCNoP() (*p, *g) {
3688
3689
3690
3691
3692
3693
3694 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3695 return nil, nil
3696 }
3697 if !gcMarkWorkAvailable(nil) {
3698 return nil, nil
3699 }
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718 lock(&sched.lock)
3719 pp, now := pidlegetSpinning(0)
3720 if pp == nil {
3721 unlock(&sched.lock)
3722 return nil, nil
3723 }
3724
3725
3726 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3727 pidleput(pp, now)
3728 unlock(&sched.lock)
3729 return nil, nil
3730 }
3731
3732 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3733 if node == nil {
3734 pidleput(pp, now)
3735 unlock(&sched.lock)
3736 gcController.removeIdleMarkWorker()
3737 return nil, nil
3738 }
3739
3740 unlock(&sched.lock)
3741
3742 return pp, node.gp.ptr()
3743 }
3744
3745
3746
3747
3748 func wakeNetPoller(when int64) {
3749 if sched.lastpoll.Load() == 0 {
3750
3751
3752
3753
3754 pollerPollUntil := sched.pollUntil.Load()
3755 if pollerPollUntil == 0 || pollerPollUntil > when {
3756 netpollBreak()
3757 }
3758 } else {
3759
3760
3761 if GOOS != "plan9" {
3762 wakep()
3763 }
3764 }
3765 }
3766
3767 func resetspinning() {
3768 gp := getg()
3769 if !gp.m.spinning {
3770 throw("resetspinning: not a spinning m")
3771 }
3772 gp.m.spinning = false
3773 nmspinning := sched.nmspinning.Add(-1)
3774 if nmspinning < 0 {
3775 throw("findrunnable: negative nmspinning")
3776 }
3777
3778
3779
3780 wakep()
3781 }
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791 func injectglist(glist *gList) {
3792 if glist.empty() {
3793 return
3794 }
3795 trace := traceAcquire()
3796 if trace.ok() {
3797 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3798 trace.GoUnpark(gp, 0)
3799 }
3800 traceRelease(trace)
3801 }
3802
3803
3804
3805 head := glist.head.ptr()
3806 var tail *g
3807 qsize := 0
3808 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3809 tail = gp
3810 qsize++
3811 casgstatus(gp, _Gwaiting, _Grunnable)
3812 }
3813
3814
3815 var q gQueue
3816 q.head.set(head)
3817 q.tail.set(tail)
3818 *glist = gList{}
3819
3820 startIdle := func(n int) {
3821 for i := 0; i < n; i++ {
3822 mp := acquirem()
3823 lock(&sched.lock)
3824
3825 pp, _ := pidlegetSpinning(0)
3826 if pp == nil {
3827 unlock(&sched.lock)
3828 releasem(mp)
3829 break
3830 }
3831
3832 startm(pp, false, true)
3833 unlock(&sched.lock)
3834 releasem(mp)
3835 }
3836 }
3837
3838 pp := getg().m.p.ptr()
3839 if pp == nil {
3840 lock(&sched.lock)
3841 globrunqputbatch(&q, int32(qsize))
3842 unlock(&sched.lock)
3843 startIdle(qsize)
3844 return
3845 }
3846
3847 npidle := int(sched.npidle.Load())
3848 var (
3849 globq gQueue
3850 n int
3851 )
3852 for n = 0; n < npidle && !q.empty(); n++ {
3853 g := q.pop()
3854 globq.pushBack(g)
3855 }
3856 if n > 0 {
3857 lock(&sched.lock)
3858 globrunqputbatch(&globq, int32(n))
3859 unlock(&sched.lock)
3860 startIdle(n)
3861 qsize -= n
3862 }
3863
3864 if !q.empty() {
3865 runqputbatch(pp, &q, qsize)
3866 }
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881 wakep()
3882 }
3883
3884
3885
3886 func schedule() {
3887 mp := getg().m
3888
3889 if mp.locks != 0 {
3890 throw("schedule: holding locks")
3891 }
3892
3893 if mp.lockedg != 0 {
3894 stoplockedm()
3895 execute(mp.lockedg.ptr(), false)
3896 }
3897
3898
3899
3900 if mp.incgo {
3901 throw("schedule: in cgo")
3902 }
3903
3904 top:
3905 pp := mp.p.ptr()
3906 pp.preempt = false
3907
3908
3909
3910
3911 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3912 throw("schedule: spinning with local work")
3913 }
3914
3915 gp, inheritTime, tryWakeP := findRunnable()
3916
3917 if debug.dontfreezetheworld > 0 && freezing.Load() {
3918
3919
3920
3921
3922
3923
3924
3925 lock(&deadlock)
3926 lock(&deadlock)
3927 }
3928
3929
3930
3931
3932 if mp.spinning {
3933 resetspinning()
3934 }
3935
3936 if sched.disable.user && !schedEnabled(gp) {
3937
3938
3939
3940 lock(&sched.lock)
3941 if schedEnabled(gp) {
3942
3943
3944 unlock(&sched.lock)
3945 } else {
3946 sched.disable.runnable.pushBack(gp)
3947 sched.disable.n++
3948 unlock(&sched.lock)
3949 goto top
3950 }
3951 }
3952
3953
3954
3955 if tryWakeP {
3956 wakep()
3957 }
3958 if gp.lockedm != 0 {
3959
3960
3961 startlockedm(gp)
3962 goto top
3963 }
3964
3965 execute(gp, inheritTime)
3966 }
3967
3968
3969
3970
3971
3972
3973
3974
3975 func dropg() {
3976 gp := getg()
3977
3978 setMNoWB(&gp.m.curg.m, nil)
3979 setGNoWB(&gp.m.curg, nil)
3980 }
3981
3982 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3983 unlock((*mutex)(lock))
3984 return true
3985 }
3986
3987
3988 func park_m(gp *g) {
3989 mp := getg().m
3990
3991 trace := traceAcquire()
3992
3993 if trace.ok() {
3994
3995
3996
3997 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
3998 }
3999
4000
4001 casgstatus(gp, _Grunning, _Gwaiting)
4002 if trace.ok() {
4003 traceRelease(trace)
4004 }
4005
4006 dropg()
4007
4008 if fn := mp.waitunlockf; fn != nil {
4009 ok := fn(gp, mp.waitlock)
4010 mp.waitunlockf = nil
4011 mp.waitlock = nil
4012 if !ok {
4013 trace := traceAcquire()
4014 casgstatus(gp, _Gwaiting, _Grunnable)
4015 if trace.ok() {
4016 trace.GoUnpark(gp, 2)
4017 traceRelease(trace)
4018 }
4019 execute(gp, true)
4020 }
4021 }
4022 schedule()
4023 }
4024
4025 func goschedImpl(gp *g, preempted bool) {
4026 trace := traceAcquire()
4027 status := readgstatus(gp)
4028 if status&^_Gscan != _Grunning {
4029 dumpgstatus(gp)
4030 throw("bad g status")
4031 }
4032 if trace.ok() {
4033
4034
4035
4036 if preempted {
4037 trace.GoPreempt()
4038 } else {
4039 trace.GoSched()
4040 }
4041 }
4042 casgstatus(gp, _Grunning, _Grunnable)
4043 if trace.ok() {
4044 traceRelease(trace)
4045 }
4046
4047 dropg()
4048 lock(&sched.lock)
4049 globrunqput(gp)
4050 unlock(&sched.lock)
4051
4052 if mainStarted {
4053 wakep()
4054 }
4055
4056 schedule()
4057 }
4058
4059
4060 func gosched_m(gp *g) {
4061 goschedImpl(gp, false)
4062 }
4063
4064
4065 func goschedguarded_m(gp *g) {
4066 if !canPreemptM(gp.m) {
4067 gogo(&gp.sched)
4068 }
4069 goschedImpl(gp, false)
4070 }
4071
4072 func gopreempt_m(gp *g) {
4073 goschedImpl(gp, true)
4074 }
4075
4076
4077
4078
4079 func preemptPark(gp *g) {
4080 status := readgstatus(gp)
4081 if status&^_Gscan != _Grunning {
4082 dumpgstatus(gp)
4083 throw("bad g status")
4084 }
4085
4086 if gp.asyncSafePoint {
4087
4088
4089
4090 f := findfunc(gp.sched.pc)
4091 if !f.valid() {
4092 throw("preempt at unknown pc")
4093 }
4094 if f.flag&abi.FuncFlagSPWrite != 0 {
4095 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4096 throw("preempt SPWRITE")
4097 }
4098 }
4099
4100
4101
4102
4103
4104
4105
4106 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4107 dropg()
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124 trace := traceAcquire()
4125 if trace.ok() {
4126 trace.GoPark(traceBlockPreempted, 0)
4127 }
4128 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4129 if trace.ok() {
4130 traceRelease(trace)
4131 }
4132 schedule()
4133 }
4134
4135
4136
4137
4138 func goyield() {
4139 checkTimeouts()
4140 mcall(goyield_m)
4141 }
4142
4143 func goyield_m(gp *g) {
4144 trace := traceAcquire()
4145 pp := gp.m.p.ptr()
4146 if trace.ok() {
4147
4148
4149
4150 trace.GoPreempt()
4151 }
4152 casgstatus(gp, _Grunning, _Grunnable)
4153 if trace.ok() {
4154 traceRelease(trace)
4155 }
4156 dropg()
4157 runqput(pp, gp, false)
4158 schedule()
4159 }
4160
4161
4162 func goexit1() {
4163 if raceenabled {
4164 racegoend()
4165 }
4166 trace := traceAcquire()
4167 if trace.ok() {
4168 trace.GoEnd()
4169 traceRelease(trace)
4170 }
4171 mcall(goexit0)
4172 }
4173
4174
4175 func goexit0(gp *g) {
4176 gdestroy(gp)
4177 schedule()
4178 }
4179
4180 func gdestroy(gp *g) {
4181 mp := getg().m
4182 pp := mp.p.ptr()
4183
4184 casgstatus(gp, _Grunning, _Gdead)
4185 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4186 if isSystemGoroutine(gp, false) {
4187 sched.ngsys.Add(-1)
4188 }
4189 gp.m = nil
4190 locked := gp.lockedm != 0
4191 gp.lockedm = 0
4192 mp.lockedg = 0
4193 gp.preemptStop = false
4194 gp.paniconfault = false
4195 gp._defer = nil
4196 gp._panic = nil
4197 gp.writebuf = nil
4198 gp.waitreason = waitReasonZero
4199 gp.param = nil
4200 gp.labels = nil
4201 gp.timer = nil
4202
4203 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4204
4205
4206
4207 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4208 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4209 gcController.bgScanCredit.Add(scanCredit)
4210 gp.gcAssistBytes = 0
4211 }
4212
4213 dropg()
4214
4215 if GOARCH == "wasm" {
4216 gfput(pp, gp)
4217 return
4218 }
4219
4220 if locked && mp.lockedInt != 0 {
4221 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4222 throw("exited a goroutine internally locked to the OS thread")
4223 }
4224 gfput(pp, gp)
4225 if locked {
4226
4227
4228
4229
4230
4231
4232 if GOOS != "plan9" {
4233 gogo(&mp.g0.sched)
4234 } else {
4235
4236
4237 mp.lockedExt = 0
4238 }
4239 }
4240 }
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250 func save(pc, sp, bp uintptr) {
4251 gp := getg()
4252
4253 if gp == gp.m.g0 || gp == gp.m.gsignal {
4254
4255
4256
4257
4258
4259 throw("save on system g not allowed")
4260 }
4261
4262 gp.sched.pc = pc
4263 gp.sched.sp = sp
4264 gp.sched.lr = 0
4265 gp.sched.ret = 0
4266 gp.sched.bp = bp
4267
4268
4269
4270 if gp.sched.ctxt != nil {
4271 badctxt()
4272 }
4273 }
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299 func reentersyscall(pc, sp, bp uintptr) {
4300 trace := traceAcquire()
4301 gp := getg()
4302
4303
4304
4305 gp.m.locks++
4306
4307
4308
4309
4310
4311 gp.stackguard0 = stackPreempt
4312 gp.throwsplit = true
4313
4314
4315 save(pc, sp, bp)
4316 gp.syscallsp = sp
4317 gp.syscallpc = pc
4318 gp.syscallbp = bp
4319 casgstatus(gp, _Grunning, _Gsyscall)
4320 if staticLockRanking {
4321
4322
4323 save(pc, sp, bp)
4324 }
4325 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4326 systemstack(func() {
4327 print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4328 throw("entersyscall")
4329 })
4330 }
4331
4332 if trace.ok() {
4333 systemstack(func() {
4334 trace.GoSysCall()
4335 traceRelease(trace)
4336 })
4337
4338
4339
4340 save(pc, sp, bp)
4341 }
4342
4343 if sched.sysmonwait.Load() {
4344 systemstack(entersyscall_sysmon)
4345 save(pc, sp, bp)
4346 }
4347
4348 if gp.m.p.ptr().runSafePointFn != 0 {
4349
4350 systemstack(runSafePointFn)
4351 save(pc, sp, bp)
4352 }
4353
4354 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4355 pp := gp.m.p.ptr()
4356 pp.m = 0
4357 gp.m.oldp.set(pp)
4358 gp.m.p = 0
4359 atomic.Store(&pp.status, _Psyscall)
4360 if sched.gcwaiting.Load() {
4361 systemstack(entersyscall_gcwait)
4362 save(pc, sp, bp)
4363 }
4364
4365 gp.m.locks--
4366 }
4367
4368
4369
4370
4371
4372
4373
4374 func entersyscall() {
4375
4376
4377
4378
4379 fp := getcallerfp()
4380 reentersyscall(getcallerpc(), getcallersp(), fp)
4381 }
4382
4383 func entersyscall_sysmon() {
4384 lock(&sched.lock)
4385 if sched.sysmonwait.Load() {
4386 sched.sysmonwait.Store(false)
4387 notewakeup(&sched.sysmonnote)
4388 }
4389 unlock(&sched.lock)
4390 }
4391
4392 func entersyscall_gcwait() {
4393 gp := getg()
4394 pp := gp.m.oldp.ptr()
4395
4396 lock(&sched.lock)
4397 trace := traceAcquire()
4398 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4399 if trace.ok() {
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409 trace.ProcSteal(pp, true)
4410 traceRelease(trace)
4411 }
4412 pp.gcStopTime = nanotime()
4413 pp.syscalltick++
4414 if sched.stopwait--; sched.stopwait == 0 {
4415 notewakeup(&sched.stopnote)
4416 }
4417 } else if trace.ok() {
4418 traceRelease(trace)
4419 }
4420 unlock(&sched.lock)
4421 }
4422
4423
4424
4425
4426 func entersyscallblock() {
4427 gp := getg()
4428
4429 gp.m.locks++
4430 gp.throwsplit = true
4431 gp.stackguard0 = stackPreempt
4432 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4433 gp.m.p.ptr().syscalltick++
4434
4435
4436 pc := getcallerpc()
4437 sp := getcallersp()
4438 bp := getcallerfp()
4439 save(pc, sp, bp)
4440 gp.syscallsp = gp.sched.sp
4441 gp.syscallpc = gp.sched.pc
4442 gp.syscallbp = gp.sched.bp
4443 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4444 sp1 := sp
4445 sp2 := gp.sched.sp
4446 sp3 := gp.syscallsp
4447 systemstack(func() {
4448 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4449 throw("entersyscallblock")
4450 })
4451 }
4452 casgstatus(gp, _Grunning, _Gsyscall)
4453 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4454 systemstack(func() {
4455 print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4456 throw("entersyscallblock")
4457 })
4458 }
4459
4460 systemstack(entersyscallblock_handoff)
4461
4462
4463 save(getcallerpc(), getcallersp(), getcallerfp())
4464
4465 gp.m.locks--
4466 }
4467
4468 func entersyscallblock_handoff() {
4469 trace := traceAcquire()
4470 if trace.ok() {
4471 trace.GoSysCall()
4472 traceRelease(trace)
4473 }
4474 handoffp(releasep())
4475 }
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489 func exitsyscall() {
4490 gp := getg()
4491
4492 gp.m.locks++
4493 if getcallersp() > gp.syscallsp {
4494 throw("exitsyscall: syscall frame is no longer valid")
4495 }
4496
4497 gp.waitsince = 0
4498 oldp := gp.m.oldp.ptr()
4499 gp.m.oldp = 0
4500 if exitsyscallfast(oldp) {
4501
4502
4503 if goroutineProfile.active {
4504
4505
4506
4507 systemstack(func() {
4508 tryRecordGoroutineProfileWB(gp)
4509 })
4510 }
4511 trace := traceAcquire()
4512 if trace.ok() {
4513 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4514 systemstack(func() {
4515
4516
4517
4518
4519 trace.GoSysExit(lostP)
4520 if lostP {
4521
4522
4523
4524
4525 trace.GoStart()
4526 }
4527 })
4528 }
4529
4530 gp.m.p.ptr().syscalltick++
4531
4532 casgstatus(gp, _Gsyscall, _Grunning)
4533 if trace.ok() {
4534 traceRelease(trace)
4535 }
4536
4537
4538
4539 gp.syscallsp = 0
4540 gp.m.locks--
4541 if gp.preempt {
4542
4543 gp.stackguard0 = stackPreempt
4544 } else {
4545
4546 gp.stackguard0 = gp.stack.lo + stackGuard
4547 }
4548 gp.throwsplit = false
4549
4550 if sched.disable.user && !schedEnabled(gp) {
4551
4552 Gosched()
4553 }
4554
4555 return
4556 }
4557
4558 gp.m.locks--
4559
4560
4561 mcall(exitsyscall0)
4562
4563
4564
4565
4566
4567
4568
4569 gp.syscallsp = 0
4570 gp.m.p.ptr().syscalltick++
4571 gp.throwsplit = false
4572 }
4573
4574
4575 func exitsyscallfast(oldp *p) bool {
4576
4577 if sched.stopwait == freezeStopWait {
4578 return false
4579 }
4580
4581
4582 trace := traceAcquire()
4583 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4584
4585 wirep(oldp)
4586 exitsyscallfast_reacquired(trace)
4587 if trace.ok() {
4588 traceRelease(trace)
4589 }
4590 return true
4591 }
4592 if trace.ok() {
4593 traceRelease(trace)
4594 }
4595
4596
4597 if sched.pidle != 0 {
4598 var ok bool
4599 systemstack(func() {
4600 ok = exitsyscallfast_pidle()
4601 })
4602 if ok {
4603 return true
4604 }
4605 }
4606 return false
4607 }
4608
4609
4610
4611
4612
4613
4614 func exitsyscallfast_reacquired(trace traceLocker) {
4615 gp := getg()
4616 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4617 if trace.ok() {
4618
4619
4620
4621 systemstack(func() {
4622
4623
4624 trace.ProcSteal(gp.m.p.ptr(), true)
4625 trace.ProcStart()
4626 })
4627 }
4628 gp.m.p.ptr().syscalltick++
4629 }
4630 }
4631
4632 func exitsyscallfast_pidle() bool {
4633 lock(&sched.lock)
4634 pp, _ := pidleget(0)
4635 if pp != nil && sched.sysmonwait.Load() {
4636 sched.sysmonwait.Store(false)
4637 notewakeup(&sched.sysmonnote)
4638 }
4639 unlock(&sched.lock)
4640 if pp != nil {
4641 acquirep(pp)
4642 return true
4643 }
4644 return false
4645 }
4646
4647
4648
4649
4650
4651
4652
4653 func exitsyscall0(gp *g) {
4654 var trace traceLocker
4655 traceExitingSyscall()
4656 trace = traceAcquire()
4657 casgstatus(gp, _Gsyscall, _Grunnable)
4658 traceExitedSyscall()
4659 if trace.ok() {
4660
4661
4662
4663
4664 trace.GoSysExit(true)
4665 traceRelease(trace)
4666 }
4667 dropg()
4668 lock(&sched.lock)
4669 var pp *p
4670 if schedEnabled(gp) {
4671 pp, _ = pidleget(0)
4672 }
4673 var locked bool
4674 if pp == nil {
4675 globrunqput(gp)
4676
4677
4678
4679
4680
4681
4682 locked = gp.lockedm != 0
4683 } else if sched.sysmonwait.Load() {
4684 sched.sysmonwait.Store(false)
4685 notewakeup(&sched.sysmonnote)
4686 }
4687 unlock(&sched.lock)
4688 if pp != nil {
4689 acquirep(pp)
4690 execute(gp, false)
4691 }
4692 if locked {
4693
4694
4695
4696
4697 stoplockedm()
4698 execute(gp, false)
4699 }
4700 stopm()
4701 schedule()
4702 }
4703
4704
4705
4706
4707
4708 func syscall_runtime_BeforeFork() {
4709 gp := getg().m.curg
4710
4711
4712
4713
4714 gp.m.locks++
4715 sigsave(&gp.m.sigmask)
4716 sigblock(false)
4717
4718
4719
4720
4721
4722 gp.stackguard0 = stackFork
4723 }
4724
4725
4726
4727
4728
4729 func syscall_runtime_AfterFork() {
4730 gp := getg().m.curg
4731
4732
4733 gp.stackguard0 = gp.stack.lo + stackGuard
4734
4735 msigrestore(gp.m.sigmask)
4736
4737 gp.m.locks--
4738 }
4739
4740
4741
4742 var inForkedChild bool
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755 func syscall_runtime_AfterForkInChild() {
4756
4757
4758
4759
4760 inForkedChild = true
4761
4762 clearSignalHandlers()
4763
4764
4765
4766 msigrestore(getg().m.sigmask)
4767
4768 inForkedChild = false
4769 }
4770
4771
4772
4773
4774 var pendingPreemptSignals atomic.Int32
4775
4776
4777
4778
4779 func syscall_runtime_BeforeExec() {
4780
4781 execLock.lock()
4782
4783
4784
4785 if GOOS == "darwin" || GOOS == "ios" {
4786 for pendingPreemptSignals.Load() > 0 {
4787 osyield()
4788 }
4789 }
4790 }
4791
4792
4793
4794
4795 func syscall_runtime_AfterExec() {
4796 execLock.unlock()
4797 }
4798
4799
4800 func malg(stacksize int32) *g {
4801 newg := new(g)
4802 if stacksize >= 0 {
4803 stacksize = round2(stackSystem + stacksize)
4804 systemstack(func() {
4805 newg.stack = stackalloc(uint32(stacksize))
4806 })
4807 newg.stackguard0 = newg.stack.lo + stackGuard
4808 newg.stackguard1 = ^uintptr(0)
4809
4810
4811 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4812 }
4813 return newg
4814 }
4815
4816
4817
4818
4819 func newproc(fn *funcval) {
4820 gp := getg()
4821 pc := getcallerpc()
4822 systemstack(func() {
4823 newg := newproc1(fn, gp, pc, false, waitReasonZero)
4824
4825 pp := getg().m.p.ptr()
4826 runqput(pp, newg, true)
4827
4828 if mainStarted {
4829 wakep()
4830 }
4831 })
4832 }
4833
4834
4835
4836
4837 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
4838 if fn == nil {
4839 fatal("go of nil func value")
4840 }
4841
4842 mp := acquirem()
4843 pp := mp.p.ptr()
4844 newg := gfget(pp)
4845 if newg == nil {
4846 newg = malg(stackMin)
4847 casgstatus(newg, _Gidle, _Gdead)
4848 allgadd(newg)
4849 }
4850 if newg.stack.hi == 0 {
4851 throw("newproc1: newg missing stack")
4852 }
4853
4854 if readgstatus(newg) != _Gdead {
4855 throw("newproc1: new g is not Gdead")
4856 }
4857
4858 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4859 totalSize = alignUp(totalSize, sys.StackAlign)
4860 sp := newg.stack.hi - totalSize
4861 if usesLR {
4862
4863 *(*uintptr)(unsafe.Pointer(sp)) = 0
4864 prepGoExitFrame(sp)
4865 }
4866 if GOARCH == "arm64" {
4867
4868 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
4869 }
4870
4871 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4872 newg.sched.sp = sp
4873 newg.stktopsp = sp
4874 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4875 newg.sched.g = guintptr(unsafe.Pointer(newg))
4876 gostartcallfn(&newg.sched, fn)
4877 newg.parentGoid = callergp.goid
4878 newg.gopc = callerpc
4879 newg.ancestors = saveAncestors(callergp)
4880 newg.startpc = fn.fn
4881 if isSystemGoroutine(newg, false) {
4882 sched.ngsys.Add(1)
4883 } else {
4884
4885 if mp.curg != nil {
4886 newg.labels = mp.curg.labels
4887 }
4888 if goroutineProfile.active {
4889
4890
4891
4892
4893
4894 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
4895 }
4896 }
4897
4898 newg.trackingSeq = uint8(cheaprand())
4899 if newg.trackingSeq%gTrackingPeriod == 0 {
4900 newg.tracking = true
4901 }
4902 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
4903
4904
4905 trace := traceAcquire()
4906 var status uint32 = _Grunnable
4907 if parked {
4908 status = _Gwaiting
4909 newg.waitreason = waitreason
4910 }
4911 casgstatus(newg, _Gdead, status)
4912 if pp.goidcache == pp.goidcacheend {
4913
4914
4915
4916 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
4917 pp.goidcache -= _GoidCacheBatch - 1
4918 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
4919 }
4920 newg.goid = pp.goidcache
4921 pp.goidcache++
4922 newg.trace.reset()
4923 if trace.ok() {
4924 trace.GoCreate(newg, newg.startpc, parked)
4925 traceRelease(trace)
4926 }
4927
4928
4929 if raceenabled {
4930 newg.racectx = racegostart(callerpc)
4931 newg.raceignore = 0
4932 if newg.labels != nil {
4933
4934
4935 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
4936 }
4937 }
4938 releasem(mp)
4939
4940 return newg
4941 }
4942
4943
4944
4945
4946 func saveAncestors(callergp *g) *[]ancestorInfo {
4947
4948 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4949 return nil
4950 }
4951 var callerAncestors []ancestorInfo
4952 if callergp.ancestors != nil {
4953 callerAncestors = *callergp.ancestors
4954 }
4955 n := int32(len(callerAncestors)) + 1
4956 if n > debug.tracebackancestors {
4957 n = debug.tracebackancestors
4958 }
4959 ancestors := make([]ancestorInfo, n)
4960 copy(ancestors[1:], callerAncestors)
4961
4962 var pcs [tracebackInnerFrames]uintptr
4963 npcs := gcallers(callergp, 0, pcs[:])
4964 ipcs := make([]uintptr, npcs)
4965 copy(ipcs, pcs[:])
4966 ancestors[0] = ancestorInfo{
4967 pcs: ipcs,
4968 goid: callergp.goid,
4969 gopc: callergp.gopc,
4970 }
4971
4972 ancestorsp := new([]ancestorInfo)
4973 *ancestorsp = ancestors
4974 return ancestorsp
4975 }
4976
4977
4978
4979 func gfput(pp *p, gp *g) {
4980 if readgstatus(gp) != _Gdead {
4981 throw("gfput: bad status (not Gdead)")
4982 }
4983
4984 stksize := gp.stack.hi - gp.stack.lo
4985
4986 if stksize != uintptr(startingStackSize) {
4987
4988 stackfree(gp.stack)
4989 gp.stack.lo = 0
4990 gp.stack.hi = 0
4991 gp.stackguard0 = 0
4992 }
4993
4994 pp.gFree.push(gp)
4995 pp.gFree.n++
4996 if pp.gFree.n >= 64 {
4997 var (
4998 inc int32
4999 stackQ gQueue
5000 noStackQ gQueue
5001 )
5002 for pp.gFree.n >= 32 {
5003 gp := pp.gFree.pop()
5004 pp.gFree.n--
5005 if gp.stack.lo == 0 {
5006 noStackQ.push(gp)
5007 } else {
5008 stackQ.push(gp)
5009 }
5010 inc++
5011 }
5012 lock(&sched.gFree.lock)
5013 sched.gFree.noStack.pushAll(noStackQ)
5014 sched.gFree.stack.pushAll(stackQ)
5015 sched.gFree.n += inc
5016 unlock(&sched.gFree.lock)
5017 }
5018 }
5019
5020
5021
5022 func gfget(pp *p) *g {
5023 retry:
5024 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5025 lock(&sched.gFree.lock)
5026
5027 for pp.gFree.n < 32 {
5028
5029 gp := sched.gFree.stack.pop()
5030 if gp == nil {
5031 gp = sched.gFree.noStack.pop()
5032 if gp == nil {
5033 break
5034 }
5035 }
5036 sched.gFree.n--
5037 pp.gFree.push(gp)
5038 pp.gFree.n++
5039 }
5040 unlock(&sched.gFree.lock)
5041 goto retry
5042 }
5043 gp := pp.gFree.pop()
5044 if gp == nil {
5045 return nil
5046 }
5047 pp.gFree.n--
5048 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5049
5050
5051
5052 systemstack(func() {
5053 stackfree(gp.stack)
5054 gp.stack.lo = 0
5055 gp.stack.hi = 0
5056 gp.stackguard0 = 0
5057 })
5058 }
5059 if gp.stack.lo == 0 {
5060
5061 systemstack(func() {
5062 gp.stack = stackalloc(startingStackSize)
5063 })
5064 gp.stackguard0 = gp.stack.lo + stackGuard
5065 } else {
5066 if raceenabled {
5067 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5068 }
5069 if msanenabled {
5070 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5071 }
5072 if asanenabled {
5073 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5074 }
5075 }
5076 return gp
5077 }
5078
5079
5080 func gfpurge(pp *p) {
5081 var (
5082 inc int32
5083 stackQ gQueue
5084 noStackQ gQueue
5085 )
5086 for !pp.gFree.empty() {
5087 gp := pp.gFree.pop()
5088 pp.gFree.n--
5089 if gp.stack.lo == 0 {
5090 noStackQ.push(gp)
5091 } else {
5092 stackQ.push(gp)
5093 }
5094 inc++
5095 }
5096 lock(&sched.gFree.lock)
5097 sched.gFree.noStack.pushAll(noStackQ)
5098 sched.gFree.stack.pushAll(stackQ)
5099 sched.gFree.n += inc
5100 unlock(&sched.gFree.lock)
5101 }
5102
5103
5104 func Breakpoint() {
5105 breakpoint()
5106 }
5107
5108
5109
5110
5111
5112
5113 func dolockOSThread() {
5114 if GOARCH == "wasm" {
5115 return
5116 }
5117 gp := getg()
5118 gp.m.lockedg.set(gp)
5119 gp.lockedm.set(gp.m)
5120 }
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138 func LockOSThread() {
5139 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5140
5141
5142
5143 startTemplateThread()
5144 }
5145 gp := getg()
5146 gp.m.lockedExt++
5147 if gp.m.lockedExt == 0 {
5148 gp.m.lockedExt--
5149 panic("LockOSThread nesting overflow")
5150 }
5151 dolockOSThread()
5152 }
5153
5154
5155 func lockOSThread() {
5156 getg().m.lockedInt++
5157 dolockOSThread()
5158 }
5159
5160
5161
5162
5163
5164
5165 func dounlockOSThread() {
5166 if GOARCH == "wasm" {
5167 return
5168 }
5169 gp := getg()
5170 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5171 return
5172 }
5173 gp.m.lockedg = 0
5174 gp.lockedm = 0
5175 }
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191 func UnlockOSThread() {
5192 gp := getg()
5193 if gp.m.lockedExt == 0 {
5194 return
5195 }
5196 gp.m.lockedExt--
5197 dounlockOSThread()
5198 }
5199
5200
5201 func unlockOSThread() {
5202 gp := getg()
5203 if gp.m.lockedInt == 0 {
5204 systemstack(badunlockosthread)
5205 }
5206 gp.m.lockedInt--
5207 dounlockOSThread()
5208 }
5209
5210 func badunlockosthread() {
5211 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5212 }
5213
5214 func gcount() int32 {
5215 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5216 for _, pp := range allp {
5217 n -= pp.gFree.n
5218 }
5219
5220
5221
5222 if n < 1 {
5223 n = 1
5224 }
5225 return n
5226 }
5227
5228 func mcount() int32 {
5229 return int32(sched.mnext - sched.nmfreed)
5230 }
5231
5232 var prof struct {
5233 signalLock atomic.Uint32
5234
5235
5236
5237 hz atomic.Int32
5238 }
5239
5240 func _System() { _System() }
5241 func _ExternalCode() { _ExternalCode() }
5242 func _LostExternalCode() { _LostExternalCode() }
5243 func _GC() { _GC() }
5244 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5245 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5246 func _VDSO() { _VDSO() }
5247
5248
5249
5250
5251
5252 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5253 if prof.hz.Load() == 0 {
5254 return
5255 }
5256
5257
5258
5259
5260 if mp != nil && mp.profilehz == 0 {
5261 return
5262 }
5263
5264
5265
5266
5267
5268
5269
5270 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5271 if f := findfunc(pc); f.valid() {
5272 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5273 cpuprof.lostAtomic++
5274 return
5275 }
5276 }
5277 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5278
5279
5280
5281 cpuprof.lostAtomic++
5282 return
5283 }
5284 }
5285
5286
5287
5288
5289
5290
5291
5292 getg().m.mallocing++
5293
5294 var u unwinder
5295 var stk [maxCPUProfStack]uintptr
5296 n := 0
5297 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5298 cgoOff := 0
5299
5300
5301
5302
5303
5304 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5305 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5306 cgoOff++
5307 }
5308 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5309 mp.cgoCallers[0] = 0
5310 }
5311
5312
5313 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5314 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5315
5316
5317 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5318 } else if mp != nil && mp.vdsoSP != 0 {
5319
5320
5321 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5322 } else {
5323 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5324 }
5325 n += tracebackPCs(&u, 0, stk[n:])
5326
5327 if n <= 0 {
5328
5329
5330 n = 2
5331 if inVDSOPage(pc) {
5332 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5333 } else if pc > firstmoduledata.etext {
5334
5335 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5336 }
5337 stk[0] = pc
5338 if mp.preemptoff != "" {
5339 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5340 } else {
5341 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5342 }
5343 }
5344
5345 if prof.hz.Load() != 0 {
5346
5347
5348
5349 var tagPtr *unsafe.Pointer
5350 if gp != nil && gp.m != nil && gp.m.curg != nil {
5351 tagPtr = &gp.m.curg.labels
5352 }
5353 cpuprof.add(tagPtr, stk[:n])
5354
5355 gprof := gp
5356 var mp *m
5357 var pp *p
5358 if gp != nil && gp.m != nil {
5359 if gp.m.curg != nil {
5360 gprof = gp.m.curg
5361 }
5362 mp = gp.m
5363 pp = gp.m.p.ptr()
5364 }
5365 traceCPUSample(gprof, mp, pp, stk[:n])
5366 }
5367 getg().m.mallocing--
5368 }
5369
5370
5371
5372 func setcpuprofilerate(hz int32) {
5373
5374 if hz < 0 {
5375 hz = 0
5376 }
5377
5378
5379
5380 gp := getg()
5381 gp.m.locks++
5382
5383
5384
5385
5386 setThreadCPUProfiler(0)
5387
5388 for !prof.signalLock.CompareAndSwap(0, 1) {
5389 osyield()
5390 }
5391 if prof.hz.Load() != hz {
5392 setProcessCPUProfiler(hz)
5393 prof.hz.Store(hz)
5394 }
5395 prof.signalLock.Store(0)
5396
5397 lock(&sched.lock)
5398 sched.profilehz = hz
5399 unlock(&sched.lock)
5400
5401 if hz != 0 {
5402 setThreadCPUProfiler(hz)
5403 }
5404
5405 gp.m.locks--
5406 }
5407
5408
5409
5410 func (pp *p) init(id int32) {
5411 pp.id = id
5412 pp.status = _Pgcstop
5413 pp.sudogcache = pp.sudogbuf[:0]
5414 pp.deferpool = pp.deferpoolbuf[:0]
5415 pp.wbBuf.reset()
5416 if pp.mcache == nil {
5417 if id == 0 {
5418 if mcache0 == nil {
5419 throw("missing mcache?")
5420 }
5421
5422
5423 pp.mcache = mcache0
5424 } else {
5425 pp.mcache = allocmcache()
5426 }
5427 }
5428 if raceenabled && pp.raceprocctx == 0 {
5429 if id == 0 {
5430 pp.raceprocctx = raceprocctx0
5431 raceprocctx0 = 0
5432 } else {
5433 pp.raceprocctx = raceproccreate()
5434 }
5435 }
5436 lockInit(&pp.timers.mu, lockRankTimers)
5437
5438
5439
5440 timerpMask.set(id)
5441
5442
5443 idlepMask.clear(id)
5444 }
5445
5446
5447
5448
5449
5450 func (pp *p) destroy() {
5451 assertLockHeld(&sched.lock)
5452 assertWorldStopped()
5453
5454
5455 for pp.runqhead != pp.runqtail {
5456
5457 pp.runqtail--
5458 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5459
5460 globrunqputhead(gp)
5461 }
5462 if pp.runnext != 0 {
5463 globrunqputhead(pp.runnext.ptr())
5464 pp.runnext = 0
5465 }
5466
5467
5468 getg().m.p.ptr().timers.take(&pp.timers)
5469
5470
5471 if gcphase != _GCoff {
5472 wbBufFlush1(pp)
5473 pp.gcw.dispose()
5474 }
5475 for i := range pp.sudogbuf {
5476 pp.sudogbuf[i] = nil
5477 }
5478 pp.sudogcache = pp.sudogbuf[:0]
5479 pp.pinnerCache = nil
5480 for j := range pp.deferpoolbuf {
5481 pp.deferpoolbuf[j] = nil
5482 }
5483 pp.deferpool = pp.deferpoolbuf[:0]
5484 systemstack(func() {
5485 for i := 0; i < pp.mspancache.len; i++ {
5486
5487 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5488 }
5489 pp.mspancache.len = 0
5490 lock(&mheap_.lock)
5491 pp.pcache.flush(&mheap_.pages)
5492 unlock(&mheap_.lock)
5493 })
5494 freemcache(pp.mcache)
5495 pp.mcache = nil
5496 gfpurge(pp)
5497 if raceenabled {
5498 if pp.timers.raceCtx != 0 {
5499
5500
5501
5502
5503
5504 mp := getg().m
5505 phold := mp.p.ptr()
5506 mp.p.set(pp)
5507
5508 racectxend(pp.timers.raceCtx)
5509 pp.timers.raceCtx = 0
5510
5511 mp.p.set(phold)
5512 }
5513 raceprocdestroy(pp.raceprocctx)
5514 pp.raceprocctx = 0
5515 }
5516 pp.gcAssistTime = 0
5517 pp.status = _Pdead
5518 }
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528 func procresize(nprocs int32) *p {
5529 assertLockHeld(&sched.lock)
5530 assertWorldStopped()
5531
5532 old := gomaxprocs
5533 if old < 0 || nprocs <= 0 {
5534 throw("procresize: invalid arg")
5535 }
5536 trace := traceAcquire()
5537 if trace.ok() {
5538 trace.Gomaxprocs(nprocs)
5539 traceRelease(trace)
5540 }
5541
5542
5543 now := nanotime()
5544 if sched.procresizetime != 0 {
5545 sched.totaltime += int64(old) * (now - sched.procresizetime)
5546 }
5547 sched.procresizetime = now
5548
5549 maskWords := (nprocs + 31) / 32
5550
5551
5552 if nprocs > int32(len(allp)) {
5553
5554
5555 lock(&allpLock)
5556 if nprocs <= int32(cap(allp)) {
5557 allp = allp[:nprocs]
5558 } else {
5559 nallp := make([]*p, nprocs)
5560
5561
5562 copy(nallp, allp[:cap(allp)])
5563 allp = nallp
5564 }
5565
5566 if maskWords <= int32(cap(idlepMask)) {
5567 idlepMask = idlepMask[:maskWords]
5568 timerpMask = timerpMask[:maskWords]
5569 } else {
5570 nidlepMask := make([]uint32, maskWords)
5571
5572 copy(nidlepMask, idlepMask)
5573 idlepMask = nidlepMask
5574
5575 ntimerpMask := make([]uint32, maskWords)
5576 copy(ntimerpMask, timerpMask)
5577 timerpMask = ntimerpMask
5578 }
5579 unlock(&allpLock)
5580 }
5581
5582
5583 for i := old; i < nprocs; i++ {
5584 pp := allp[i]
5585 if pp == nil {
5586 pp = new(p)
5587 }
5588 pp.init(i)
5589 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5590 }
5591
5592 gp := getg()
5593 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5594
5595 gp.m.p.ptr().status = _Prunning
5596 gp.m.p.ptr().mcache.prepareForSweep()
5597 } else {
5598
5599
5600
5601
5602
5603 if gp.m.p != 0 {
5604 trace := traceAcquire()
5605 if trace.ok() {
5606
5607
5608
5609 trace.GoSched()
5610 trace.ProcStop(gp.m.p.ptr())
5611 traceRelease(trace)
5612 }
5613 gp.m.p.ptr().m = 0
5614 }
5615 gp.m.p = 0
5616 pp := allp[0]
5617 pp.m = 0
5618 pp.status = _Pidle
5619 acquirep(pp)
5620 trace := traceAcquire()
5621 if trace.ok() {
5622 trace.GoStart()
5623 traceRelease(trace)
5624 }
5625 }
5626
5627
5628 mcache0 = nil
5629
5630
5631 for i := nprocs; i < old; i++ {
5632 pp := allp[i]
5633 pp.destroy()
5634
5635 }
5636
5637
5638 if int32(len(allp)) != nprocs {
5639 lock(&allpLock)
5640 allp = allp[:nprocs]
5641 idlepMask = idlepMask[:maskWords]
5642 timerpMask = timerpMask[:maskWords]
5643 unlock(&allpLock)
5644 }
5645
5646 var runnablePs *p
5647 for i := nprocs - 1; i >= 0; i-- {
5648 pp := allp[i]
5649 if gp.m.p.ptr() == pp {
5650 continue
5651 }
5652 pp.status = _Pidle
5653 if runqempty(pp) {
5654 pidleput(pp, now)
5655 } else {
5656 pp.m.set(mget())
5657 pp.link.set(runnablePs)
5658 runnablePs = pp
5659 }
5660 }
5661 stealOrder.reset(uint32(nprocs))
5662 var int32p *int32 = &gomaxprocs
5663 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5664 if old != nprocs {
5665
5666 gcCPULimiter.resetCapacity(now, nprocs)
5667 }
5668 return runnablePs
5669 }
5670
5671
5672
5673
5674
5675
5676
5677 func acquirep(pp *p) {
5678
5679 wirep(pp)
5680
5681
5682
5683
5684
5685 pp.mcache.prepareForSweep()
5686
5687 trace := traceAcquire()
5688 if trace.ok() {
5689 trace.ProcStart()
5690 traceRelease(trace)
5691 }
5692 }
5693
5694
5695
5696
5697
5698
5699
5700 func wirep(pp *p) {
5701 gp := getg()
5702
5703 if gp.m.p != 0 {
5704
5705
5706 systemstack(func() {
5707 throw("wirep: already in go")
5708 })
5709 }
5710 if pp.m != 0 || pp.status != _Pidle {
5711
5712
5713 systemstack(func() {
5714 id := int64(0)
5715 if pp.m != 0 {
5716 id = pp.m.ptr().id
5717 }
5718 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5719 throw("wirep: invalid p state")
5720 })
5721 }
5722 gp.m.p.set(pp)
5723 pp.m.set(gp.m)
5724 pp.status = _Prunning
5725 }
5726
5727
5728 func releasep() *p {
5729 trace := traceAcquire()
5730 if trace.ok() {
5731 trace.ProcStop(getg().m.p.ptr())
5732 traceRelease(trace)
5733 }
5734 return releasepNoTrace()
5735 }
5736
5737
5738 func releasepNoTrace() *p {
5739 gp := getg()
5740
5741 if gp.m.p == 0 {
5742 throw("releasep: invalid arg")
5743 }
5744 pp := gp.m.p.ptr()
5745 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5746 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5747 throw("releasep: invalid p state")
5748 }
5749 gp.m.p = 0
5750 pp.m = 0
5751 pp.status = _Pidle
5752 return pp
5753 }
5754
5755 func incidlelocked(v int32) {
5756 lock(&sched.lock)
5757 sched.nmidlelocked += v
5758 if v > 0 {
5759 checkdead()
5760 }
5761 unlock(&sched.lock)
5762 }
5763
5764
5765
5766
5767 func checkdead() {
5768 assertLockHeld(&sched.lock)
5769
5770
5771
5772
5773 if islibrary || isarchive {
5774 return
5775 }
5776
5777
5778
5779
5780
5781 if panicking.Load() > 0 {
5782 return
5783 }
5784
5785
5786
5787
5788
5789 var run0 int32
5790 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5791 run0 = 1
5792 }
5793
5794 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5795 if run > run0 {
5796 return
5797 }
5798 if run < 0 {
5799 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5800 unlock(&sched.lock)
5801 throw("checkdead: inconsistent counts")
5802 }
5803
5804 grunning := 0
5805 forEachG(func(gp *g) {
5806 if isSystemGoroutine(gp, false) {
5807 return
5808 }
5809 s := readgstatus(gp)
5810 switch s &^ _Gscan {
5811 case _Gwaiting,
5812 _Gpreempted:
5813 grunning++
5814 case _Grunnable,
5815 _Grunning,
5816 _Gsyscall:
5817 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5818 unlock(&sched.lock)
5819 throw("checkdead: runnable g")
5820 }
5821 })
5822 if grunning == 0 {
5823 unlock(&sched.lock)
5824 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5825 }
5826
5827
5828 if faketime != 0 {
5829 if when := timeSleepUntil(); when < maxWhen {
5830 faketime = when
5831
5832
5833 pp, _ := pidleget(faketime)
5834 if pp == nil {
5835
5836
5837 unlock(&sched.lock)
5838 throw("checkdead: no p for timer")
5839 }
5840 mp := mget()
5841 if mp == nil {
5842
5843
5844 unlock(&sched.lock)
5845 throw("checkdead: no m for timer")
5846 }
5847
5848
5849
5850 sched.nmspinning.Add(1)
5851 mp.spinning = true
5852 mp.nextp.set(pp)
5853 notewakeup(&mp.park)
5854 return
5855 }
5856 }
5857
5858
5859 for _, pp := range allp {
5860 if len(pp.timers.heap) > 0 {
5861 return
5862 }
5863 }
5864
5865 unlock(&sched.lock)
5866 fatal("all goroutines are asleep - deadlock!")
5867 }
5868
5869
5870
5871
5872
5873
5874 var forcegcperiod int64 = 2 * 60 * 1e9
5875
5876
5877
5878 var needSysmonWorkaround bool = false
5879
5880
5881
5882
5883 const haveSysmon = GOARCH != "wasm"
5884
5885
5886
5887
5888 func sysmon() {
5889 lock(&sched.lock)
5890 sched.nmsys++
5891 checkdead()
5892 unlock(&sched.lock)
5893
5894 lasttrace := int64(0)
5895 idle := 0
5896 delay := uint32(0)
5897
5898 for {
5899 if idle == 0 {
5900 delay = 20
5901 } else if idle > 50 {
5902 delay *= 2
5903 }
5904 if delay > 10*1000 {
5905 delay = 10 * 1000
5906 }
5907 usleep(delay)
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924 now := nanotime()
5925 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
5926 lock(&sched.lock)
5927 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
5928 syscallWake := false
5929 next := timeSleepUntil()
5930 if next > now {
5931 sched.sysmonwait.Store(true)
5932 unlock(&sched.lock)
5933
5934
5935 sleep := forcegcperiod / 2
5936 if next-now < sleep {
5937 sleep = next - now
5938 }
5939 shouldRelax := sleep >= osRelaxMinNS
5940 if shouldRelax {
5941 osRelax(true)
5942 }
5943 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5944 if shouldRelax {
5945 osRelax(false)
5946 }
5947 lock(&sched.lock)
5948 sched.sysmonwait.Store(false)
5949 noteclear(&sched.sysmonnote)
5950 }
5951 if syscallWake {
5952 idle = 0
5953 delay = 20
5954 }
5955 }
5956 unlock(&sched.lock)
5957 }
5958
5959 lock(&sched.sysmonlock)
5960
5961
5962 now = nanotime()
5963
5964
5965 if *cgo_yield != nil {
5966 asmcgocall(*cgo_yield, nil)
5967 }
5968
5969 lastpoll := sched.lastpoll.Load()
5970 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5971 sched.lastpoll.CompareAndSwap(lastpoll, now)
5972 list, delta := netpoll(0)
5973 if !list.empty() {
5974
5975
5976
5977
5978
5979
5980
5981 incidlelocked(-1)
5982 injectglist(&list)
5983 incidlelocked(1)
5984 netpollAdjustWaiters(delta)
5985 }
5986 }
5987 if GOOS == "netbsd" && needSysmonWorkaround {
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003 if next := timeSleepUntil(); next < now {
6004 startm(nil, false, false)
6005 }
6006 }
6007 if scavenger.sysmonWake.Load() != 0 {
6008
6009 scavenger.wake()
6010 }
6011
6012
6013 if retake(now) != 0 {
6014 idle = 0
6015 } else {
6016 idle++
6017 }
6018
6019 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6020 lock(&forcegc.lock)
6021 forcegc.idle.Store(false)
6022 var list gList
6023 list.push(forcegc.g)
6024 injectglist(&list)
6025 unlock(&forcegc.lock)
6026 }
6027 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6028 lasttrace = now
6029 schedtrace(debug.scheddetail > 0)
6030 }
6031 unlock(&sched.sysmonlock)
6032 }
6033 }
6034
6035 type sysmontick struct {
6036 schedtick uint32
6037 syscalltick uint32
6038 schedwhen int64
6039 syscallwhen int64
6040 }
6041
6042
6043
6044 const forcePreemptNS = 10 * 1000 * 1000
6045
6046 func retake(now int64) uint32 {
6047 n := 0
6048
6049
6050 lock(&allpLock)
6051
6052
6053
6054 for i := 0; i < len(allp); i++ {
6055 pp := allp[i]
6056 if pp == nil {
6057
6058
6059 continue
6060 }
6061 pd := &pp.sysmontick
6062 s := pp.status
6063 sysretake := false
6064 if s == _Prunning || s == _Psyscall {
6065
6066
6067
6068
6069 t := int64(pp.schedtick)
6070 if int64(pd.schedtick) != t {
6071 pd.schedtick = uint32(t)
6072 pd.schedwhen = now
6073 } else if pd.schedwhen+forcePreemptNS <= now {
6074 preemptone(pp)
6075
6076
6077 sysretake = true
6078 }
6079 }
6080 if s == _Psyscall {
6081
6082 t := int64(pp.syscalltick)
6083 if !sysretake && int64(pd.syscalltick) != t {
6084 pd.syscalltick = uint32(t)
6085 pd.syscallwhen = now
6086 continue
6087 }
6088
6089
6090
6091 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6092 continue
6093 }
6094
6095 unlock(&allpLock)
6096
6097
6098
6099
6100 incidlelocked(-1)
6101 trace := traceAcquire()
6102 if atomic.Cas(&pp.status, s, _Pidle) {
6103 if trace.ok() {
6104 trace.ProcSteal(pp, false)
6105 traceRelease(trace)
6106 }
6107 n++
6108 pp.syscalltick++
6109 handoffp(pp)
6110 } else if trace.ok() {
6111 traceRelease(trace)
6112 }
6113 incidlelocked(1)
6114 lock(&allpLock)
6115 }
6116 }
6117 unlock(&allpLock)
6118 return uint32(n)
6119 }
6120
6121
6122
6123
6124
6125
6126 func preemptall() bool {
6127 res := false
6128 for _, pp := range allp {
6129 if pp.status != _Prunning {
6130 continue
6131 }
6132 if preemptone(pp) {
6133 res = true
6134 }
6135 }
6136 return res
6137 }
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149 func preemptone(pp *p) bool {
6150 mp := pp.m.ptr()
6151 if mp == nil || mp == getg().m {
6152 return false
6153 }
6154 gp := mp.curg
6155 if gp == nil || gp == mp.g0 {
6156 return false
6157 }
6158
6159 gp.preempt = true
6160
6161
6162
6163
6164
6165 gp.stackguard0 = stackPreempt
6166
6167
6168 if preemptMSupported && debug.asyncpreemptoff == 0 {
6169 pp.preempt = true
6170 preemptM(mp)
6171 }
6172
6173 return true
6174 }
6175
6176 var starttime int64
6177
6178 func schedtrace(detailed bool) {
6179 now := nanotime()
6180 if starttime == 0 {
6181 starttime = now
6182 }
6183
6184 lock(&sched.lock)
6185 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6186 if detailed {
6187 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6188 }
6189
6190
6191
6192 for i, pp := range allp {
6193 mp := pp.m.ptr()
6194 h := atomic.Load(&pp.runqhead)
6195 t := atomic.Load(&pp.runqtail)
6196 if detailed {
6197 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6198 if mp != nil {
6199 print(mp.id)
6200 } else {
6201 print("nil")
6202 }
6203 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6204 } else {
6205
6206
6207 print(" ")
6208 if i == 0 {
6209 print("[")
6210 }
6211 print(t - h)
6212 if i == len(allp)-1 {
6213 print("]\n")
6214 }
6215 }
6216 }
6217
6218 if !detailed {
6219 unlock(&sched.lock)
6220 return
6221 }
6222
6223 for mp := allm; mp != nil; mp = mp.alllink {
6224 pp := mp.p.ptr()
6225 print(" M", mp.id, ": p=")
6226 if pp != nil {
6227 print(pp.id)
6228 } else {
6229 print("nil")
6230 }
6231 print(" curg=")
6232 if mp.curg != nil {
6233 print(mp.curg.goid)
6234 } else {
6235 print("nil")
6236 }
6237 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6238 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6239 print(lockedg.goid)
6240 } else {
6241 print("nil")
6242 }
6243 print("\n")
6244 }
6245
6246 forEachG(func(gp *g) {
6247 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6248 if gp.m != nil {
6249 print(gp.m.id)
6250 } else {
6251 print("nil")
6252 }
6253 print(" lockedm=")
6254 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6255 print(lockedm.id)
6256 } else {
6257 print("nil")
6258 }
6259 print("\n")
6260 })
6261 unlock(&sched.lock)
6262 }
6263
6264
6265
6266
6267
6268
6269 func schedEnableUser(enable bool) {
6270 lock(&sched.lock)
6271 if sched.disable.user == !enable {
6272 unlock(&sched.lock)
6273 return
6274 }
6275 sched.disable.user = !enable
6276 if enable {
6277 n := sched.disable.n
6278 sched.disable.n = 0
6279 globrunqputbatch(&sched.disable.runnable, n)
6280 unlock(&sched.lock)
6281 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6282 startm(nil, false, false)
6283 }
6284 } else {
6285 unlock(&sched.lock)
6286 }
6287 }
6288
6289
6290
6291
6292
6293 func schedEnabled(gp *g) bool {
6294 assertLockHeld(&sched.lock)
6295
6296 if sched.disable.user {
6297 return isSystemGoroutine(gp, true)
6298 }
6299 return true
6300 }
6301
6302
6303
6304
6305
6306
6307 func mput(mp *m) {
6308 assertLockHeld(&sched.lock)
6309
6310 mp.schedlink = sched.midle
6311 sched.midle.set(mp)
6312 sched.nmidle++
6313 checkdead()
6314 }
6315
6316
6317
6318
6319
6320
6321 func mget() *m {
6322 assertLockHeld(&sched.lock)
6323
6324 mp := sched.midle.ptr()
6325 if mp != nil {
6326 sched.midle = mp.schedlink
6327 sched.nmidle--
6328 }
6329 return mp
6330 }
6331
6332
6333
6334
6335
6336
6337 func globrunqput(gp *g) {
6338 assertLockHeld(&sched.lock)
6339
6340 sched.runq.pushBack(gp)
6341 sched.runqsize++
6342 }
6343
6344
6345
6346
6347
6348
6349 func globrunqputhead(gp *g) {
6350 assertLockHeld(&sched.lock)
6351
6352 sched.runq.push(gp)
6353 sched.runqsize++
6354 }
6355
6356
6357
6358
6359
6360
6361
6362 func globrunqputbatch(batch *gQueue, n int32) {
6363 assertLockHeld(&sched.lock)
6364
6365 sched.runq.pushBackAll(*batch)
6366 sched.runqsize += n
6367 *batch = gQueue{}
6368 }
6369
6370
6371
6372 func globrunqget(pp *p, max int32) *g {
6373 assertLockHeld(&sched.lock)
6374
6375 if sched.runqsize == 0 {
6376 return nil
6377 }
6378
6379 n := sched.runqsize/gomaxprocs + 1
6380 if n > sched.runqsize {
6381 n = sched.runqsize
6382 }
6383 if max > 0 && n > max {
6384 n = max
6385 }
6386 if n > int32(len(pp.runq))/2 {
6387 n = int32(len(pp.runq)) / 2
6388 }
6389
6390 sched.runqsize -= n
6391
6392 gp := sched.runq.pop()
6393 n--
6394 for ; n > 0; n-- {
6395 gp1 := sched.runq.pop()
6396 runqput(pp, gp1, false)
6397 }
6398 return gp
6399 }
6400
6401
6402 type pMask []uint32
6403
6404
6405 func (p pMask) read(id uint32) bool {
6406 word := id / 32
6407 mask := uint32(1) << (id % 32)
6408 return (atomic.Load(&p[word]) & mask) != 0
6409 }
6410
6411
6412 func (p pMask) set(id int32) {
6413 word := id / 32
6414 mask := uint32(1) << (id % 32)
6415 atomic.Or(&p[word], mask)
6416 }
6417
6418
6419 func (p pMask) clear(id int32) {
6420 word := id / 32
6421 mask := uint32(1) << (id % 32)
6422 atomic.And(&p[word], ^mask)
6423 }
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436 func pidleput(pp *p, now int64) int64 {
6437 assertLockHeld(&sched.lock)
6438
6439 if !runqempty(pp) {
6440 throw("pidleput: P has non-empty run queue")
6441 }
6442 if now == 0 {
6443 now = nanotime()
6444 }
6445 if pp.timers.len.Load() == 0 {
6446 timerpMask.clear(pp.id)
6447 }
6448 idlepMask.set(pp.id)
6449 pp.link = sched.pidle
6450 sched.pidle.set(pp)
6451 sched.npidle.Add(1)
6452 if !pp.limiterEvent.start(limiterEventIdle, now) {
6453 throw("must be able to track idle limiter event")
6454 }
6455 return now
6456 }
6457
6458
6459
6460
6461
6462
6463
6464
6465 func pidleget(now int64) (*p, int64) {
6466 assertLockHeld(&sched.lock)
6467
6468 pp := sched.pidle.ptr()
6469 if pp != nil {
6470
6471 if now == 0 {
6472 now = nanotime()
6473 }
6474 timerpMask.set(pp.id)
6475 idlepMask.clear(pp.id)
6476 sched.pidle = pp.link
6477 sched.npidle.Add(-1)
6478 pp.limiterEvent.stop(limiterEventIdle, now)
6479 }
6480 return pp, now
6481 }
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493 func pidlegetSpinning(now int64) (*p, int64) {
6494 assertLockHeld(&sched.lock)
6495
6496 pp, now := pidleget(now)
6497 if pp == nil {
6498
6499
6500
6501 sched.needspinning.Store(1)
6502 return nil, now
6503 }
6504
6505 return pp, now
6506 }
6507
6508
6509
6510 func runqempty(pp *p) bool {
6511
6512
6513
6514
6515 for {
6516 head := atomic.Load(&pp.runqhead)
6517 tail := atomic.Load(&pp.runqtail)
6518 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6519 if tail == atomic.Load(&pp.runqtail) {
6520 return head == tail && runnext == 0
6521 }
6522 }
6523 }
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534 const randomizeScheduler = raceenabled
6535
6536
6537
6538
6539
6540
6541 func runqput(pp *p, gp *g, next bool) {
6542 if !haveSysmon && next {
6543
6544
6545
6546
6547
6548
6549
6550
6551 next = false
6552 }
6553 if randomizeScheduler && next && randn(2) == 0 {
6554 next = false
6555 }
6556
6557 if next {
6558 retryNext:
6559 oldnext := pp.runnext
6560 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6561 goto retryNext
6562 }
6563 if oldnext == 0 {
6564 return
6565 }
6566
6567 gp = oldnext.ptr()
6568 }
6569
6570 retry:
6571 h := atomic.LoadAcq(&pp.runqhead)
6572 t := pp.runqtail
6573 if t-h < uint32(len(pp.runq)) {
6574 pp.runq[t%uint32(len(pp.runq))].set(gp)
6575 atomic.StoreRel(&pp.runqtail, t+1)
6576 return
6577 }
6578 if runqputslow(pp, gp, h, t) {
6579 return
6580 }
6581
6582 goto retry
6583 }
6584
6585
6586
6587 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6588 var batch [len(pp.runq)/2 + 1]*g
6589
6590
6591 n := t - h
6592 n = n / 2
6593 if n != uint32(len(pp.runq)/2) {
6594 throw("runqputslow: queue is not full")
6595 }
6596 for i := uint32(0); i < n; i++ {
6597 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6598 }
6599 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6600 return false
6601 }
6602 batch[n] = gp
6603
6604 if randomizeScheduler {
6605 for i := uint32(1); i <= n; i++ {
6606 j := cheaprandn(i + 1)
6607 batch[i], batch[j] = batch[j], batch[i]
6608 }
6609 }
6610
6611
6612 for i := uint32(0); i < n; i++ {
6613 batch[i].schedlink.set(batch[i+1])
6614 }
6615 var q gQueue
6616 q.head.set(batch[0])
6617 q.tail.set(batch[n])
6618
6619
6620 lock(&sched.lock)
6621 globrunqputbatch(&q, int32(n+1))
6622 unlock(&sched.lock)
6623 return true
6624 }
6625
6626
6627
6628
6629
6630 func runqputbatch(pp *p, q *gQueue, qsize int) {
6631 h := atomic.LoadAcq(&pp.runqhead)
6632 t := pp.runqtail
6633 n := uint32(0)
6634 for !q.empty() && t-h < uint32(len(pp.runq)) {
6635 gp := q.pop()
6636 pp.runq[t%uint32(len(pp.runq))].set(gp)
6637 t++
6638 n++
6639 }
6640 qsize -= int(n)
6641
6642 if randomizeScheduler {
6643 off := func(o uint32) uint32 {
6644 return (pp.runqtail + o) % uint32(len(pp.runq))
6645 }
6646 for i := uint32(1); i < n; i++ {
6647 j := cheaprandn(i + 1)
6648 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6649 }
6650 }
6651
6652 atomic.StoreRel(&pp.runqtail, t)
6653 if !q.empty() {
6654 lock(&sched.lock)
6655 globrunqputbatch(q, int32(qsize))
6656 unlock(&sched.lock)
6657 }
6658 }
6659
6660
6661
6662
6663
6664 func runqget(pp *p) (gp *g, inheritTime bool) {
6665
6666 next := pp.runnext
6667
6668
6669
6670 if next != 0 && pp.runnext.cas(next, 0) {
6671 return next.ptr(), true
6672 }
6673
6674 for {
6675 h := atomic.LoadAcq(&pp.runqhead)
6676 t := pp.runqtail
6677 if t == h {
6678 return nil, false
6679 }
6680 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6681 if atomic.CasRel(&pp.runqhead, h, h+1) {
6682 return gp, false
6683 }
6684 }
6685 }
6686
6687
6688
6689 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6690 oldNext := pp.runnext
6691 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6692 drainQ.pushBack(oldNext.ptr())
6693 n++
6694 }
6695
6696 retry:
6697 h := atomic.LoadAcq(&pp.runqhead)
6698 t := pp.runqtail
6699 qn := t - h
6700 if qn == 0 {
6701 return
6702 }
6703 if qn > uint32(len(pp.runq)) {
6704 goto retry
6705 }
6706
6707 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6708 goto retry
6709 }
6710
6711
6712
6713
6714
6715
6716
6717
6718 for i := uint32(0); i < qn; i++ {
6719 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6720 drainQ.pushBack(gp)
6721 n++
6722 }
6723 return
6724 }
6725
6726
6727
6728
6729
6730 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6731 for {
6732 h := atomic.LoadAcq(&pp.runqhead)
6733 t := atomic.LoadAcq(&pp.runqtail)
6734 n := t - h
6735 n = n - n/2
6736 if n == 0 {
6737 if stealRunNextG {
6738
6739 if next := pp.runnext; next != 0 {
6740 if pp.status == _Prunning {
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751 if !osHasLowResTimer {
6752 usleep(3)
6753 } else {
6754
6755
6756
6757 osyield()
6758 }
6759 }
6760 if !pp.runnext.cas(next, 0) {
6761 continue
6762 }
6763 batch[batchHead%uint32(len(batch))] = next
6764 return 1
6765 }
6766 }
6767 return 0
6768 }
6769 if n > uint32(len(pp.runq)/2) {
6770 continue
6771 }
6772 for i := uint32(0); i < n; i++ {
6773 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6774 batch[(batchHead+i)%uint32(len(batch))] = g
6775 }
6776 if atomic.CasRel(&pp.runqhead, h, h+n) {
6777 return n
6778 }
6779 }
6780 }
6781
6782
6783
6784
6785 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6786 t := pp.runqtail
6787 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6788 if n == 0 {
6789 return nil
6790 }
6791 n--
6792 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6793 if n == 0 {
6794 return gp
6795 }
6796 h := atomic.LoadAcq(&pp.runqhead)
6797 if t-h+n >= uint32(len(pp.runq)) {
6798 throw("runqsteal: runq overflow")
6799 }
6800 atomic.StoreRel(&pp.runqtail, t+n)
6801 return gp
6802 }
6803
6804
6805
6806 type gQueue struct {
6807 head guintptr
6808 tail guintptr
6809 }
6810
6811
6812 func (q *gQueue) empty() bool {
6813 return q.head == 0
6814 }
6815
6816
6817 func (q *gQueue) push(gp *g) {
6818 gp.schedlink = q.head
6819 q.head.set(gp)
6820 if q.tail == 0 {
6821 q.tail.set(gp)
6822 }
6823 }
6824
6825
6826 func (q *gQueue) pushBack(gp *g) {
6827 gp.schedlink = 0
6828 if q.tail != 0 {
6829 q.tail.ptr().schedlink.set(gp)
6830 } else {
6831 q.head.set(gp)
6832 }
6833 q.tail.set(gp)
6834 }
6835
6836
6837
6838 func (q *gQueue) pushBackAll(q2 gQueue) {
6839 if q2.tail == 0 {
6840 return
6841 }
6842 q2.tail.ptr().schedlink = 0
6843 if q.tail != 0 {
6844 q.tail.ptr().schedlink = q2.head
6845 } else {
6846 q.head = q2.head
6847 }
6848 q.tail = q2.tail
6849 }
6850
6851
6852
6853 func (q *gQueue) pop() *g {
6854 gp := q.head.ptr()
6855 if gp != nil {
6856 q.head = gp.schedlink
6857 if q.head == 0 {
6858 q.tail = 0
6859 }
6860 }
6861 return gp
6862 }
6863
6864
6865 func (q *gQueue) popList() gList {
6866 stack := gList{q.head}
6867 *q = gQueue{}
6868 return stack
6869 }
6870
6871
6872
6873 type gList struct {
6874 head guintptr
6875 }
6876
6877
6878 func (l *gList) empty() bool {
6879 return l.head == 0
6880 }
6881
6882
6883 func (l *gList) push(gp *g) {
6884 gp.schedlink = l.head
6885 l.head.set(gp)
6886 }
6887
6888
6889 func (l *gList) pushAll(q gQueue) {
6890 if !q.empty() {
6891 q.tail.ptr().schedlink = l.head
6892 l.head = q.head
6893 }
6894 }
6895
6896
6897 func (l *gList) pop() *g {
6898 gp := l.head.ptr()
6899 if gp != nil {
6900 l.head = gp.schedlink
6901 }
6902 return gp
6903 }
6904
6905
6906 func setMaxThreads(in int) (out int) {
6907 lock(&sched.lock)
6908 out = int(sched.maxmcount)
6909 if in > 0x7fffffff {
6910 sched.maxmcount = 0x7fffffff
6911 } else {
6912 sched.maxmcount = int32(in)
6913 }
6914 checkmcount()
6915 unlock(&sched.lock)
6916 return
6917 }
6918
6919
6920 func procPin() int {
6921 gp := getg()
6922 mp := gp.m
6923
6924 mp.locks++
6925 return int(mp.p.ptr().id)
6926 }
6927
6928
6929 func procUnpin() {
6930 gp := getg()
6931 gp.m.locks--
6932 }
6933
6934
6935
6936 func sync_runtime_procPin() int {
6937 return procPin()
6938 }
6939
6940
6941
6942 func sync_runtime_procUnpin() {
6943 procUnpin()
6944 }
6945
6946
6947
6948 func sync_atomic_runtime_procPin() int {
6949 return procPin()
6950 }
6951
6952
6953
6954 func sync_atomic_runtime_procUnpin() {
6955 procUnpin()
6956 }
6957
6958
6959
6960
6961
6962 func sync_runtime_canSpin(i int) bool {
6963
6964
6965
6966
6967
6968 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
6969 return false
6970 }
6971 if p := getg().m.p.ptr(); !runqempty(p) {
6972 return false
6973 }
6974 return true
6975 }
6976
6977
6978
6979 func sync_runtime_doSpin() {
6980 procyield(active_spin_cnt)
6981 }
6982
6983 var stealOrder randomOrder
6984
6985
6986
6987
6988
6989 type randomOrder struct {
6990 count uint32
6991 coprimes []uint32
6992 }
6993
6994 type randomEnum struct {
6995 i uint32
6996 count uint32
6997 pos uint32
6998 inc uint32
6999 }
7000
7001 func (ord *randomOrder) reset(count uint32) {
7002 ord.count = count
7003 ord.coprimes = ord.coprimes[:0]
7004 for i := uint32(1); i <= count; i++ {
7005 if gcd(i, count) == 1 {
7006 ord.coprimes = append(ord.coprimes, i)
7007 }
7008 }
7009 }
7010
7011 func (ord *randomOrder) start(i uint32) randomEnum {
7012 return randomEnum{
7013 count: ord.count,
7014 pos: i % ord.count,
7015 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7016 }
7017 }
7018
7019 func (enum *randomEnum) done() bool {
7020 return enum.i == enum.count
7021 }
7022
7023 func (enum *randomEnum) next() {
7024 enum.i++
7025 enum.pos = (enum.pos + enum.inc) % enum.count
7026 }
7027
7028 func (enum *randomEnum) position() uint32 {
7029 return enum.pos
7030 }
7031
7032 func gcd(a, b uint32) uint32 {
7033 for b != 0 {
7034 a, b = b, a%b
7035 }
7036 return a
7037 }
7038
7039
7040
7041 type initTask struct {
7042 state uint32
7043 nfns uint32
7044
7045 }
7046
7047
7048
7049 var inittrace tracestat
7050
7051 type tracestat struct {
7052 active bool
7053 id uint64
7054 allocs uint64
7055 bytes uint64
7056 }
7057
7058 func doInit(ts []*initTask) {
7059 for _, t := range ts {
7060 doInit1(t)
7061 }
7062 }
7063
7064 func doInit1(t *initTask) {
7065 switch t.state {
7066 case 2:
7067 return
7068 case 1:
7069 throw("recursive call during initialization - linker skew")
7070 default:
7071 t.state = 1
7072
7073 var (
7074 start int64
7075 before tracestat
7076 )
7077
7078 if inittrace.active {
7079 start = nanotime()
7080
7081 before = inittrace
7082 }
7083
7084 if t.nfns == 0 {
7085
7086 throw("inittask with no functions")
7087 }
7088
7089 firstFunc := add(unsafe.Pointer(t), 8)
7090 for i := uint32(0); i < t.nfns; i++ {
7091 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7092 f := *(*func())(unsafe.Pointer(&p))
7093 f()
7094 }
7095
7096 if inittrace.active {
7097 end := nanotime()
7098
7099 after := inittrace
7100
7101 f := *(*func())(unsafe.Pointer(&firstFunc))
7102 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7103
7104 var sbuf [24]byte
7105 print("init ", pkg, " @")
7106 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7107 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7108 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7109 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7110 print("\n")
7111 }
7112
7113 t.state = 2
7114 }
7115 }
7116
View as plain text