Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/sys"
15 "internal/stringslite"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 if GOARCH == "wasm" {
270
271
272
273
274
275
276
277 pause(sys.GetCallerSP() - 16)
278 panic("unreachable")
279 }
280 return
281 }
282 fn := main_main
283 fn()
284
285 exitHooksRun := false
286 if raceenabled {
287 runExitHooks(0)
288 exitHooksRun = true
289 racefini()
290 }
291
292
293
294
295
296
297
298
299 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
300 runExitHooks(0)
301 exitHooksRun = true
302 lsandoleakcheck()
303 }
304
305
306
307
308
309 if runningPanicDefers.Load() != 0 {
310
311 for c := 0; c < 1000; c++ {
312 if runningPanicDefers.Load() == 0 {
313 break
314 }
315 Gosched()
316 }
317 }
318 if panicking.Load() != 0 {
319 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
320 }
321 if !exitHooksRun {
322 runExitHooks(0)
323 }
324
325 exit(0)
326 for {
327 var x *int32
328 *x = 0
329 }
330 }
331
332
333
334
335 func os_beforeExit(exitCode int) {
336 runExitHooks(exitCode)
337 if exitCode == 0 && raceenabled {
338 racefini()
339 }
340
341
342 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
343 lsandoleakcheck()
344 }
345 }
346
347 func init() {
348 exithook.Gosched = Gosched
349 exithook.Goid = func() uint64 { return getg().goid }
350 exithook.Throw = throw
351 }
352
353 func runExitHooks(code int) {
354 exithook.Run(code)
355 }
356
357
358 func init() {
359 go forcegchelper()
360 }
361
362 func forcegchelper() {
363 forcegc.g = getg()
364 lockInit(&forcegc.lock, lockRankForcegc)
365 for {
366 lock(&forcegc.lock)
367 if forcegc.idle.Load() {
368 throw("forcegc: phase error")
369 }
370 forcegc.idle.Store(true)
371 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
372
373 if debug.gctrace > 0 {
374 println("GC forced")
375 }
376
377 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
378 }
379 }
380
381
382
383
384
385 func Gosched() {
386 checkTimeouts()
387 mcall(gosched_m)
388 }
389
390
391
392
393
394 func goschedguarded() {
395 mcall(goschedguarded_m)
396 }
397
398
399
400
401
402
403 func goschedIfBusy() {
404 gp := getg()
405
406
407 if !gp.preempt && sched.npidle.Load() > 0 {
408 return
409 }
410 mcall(gosched_m)
411 }
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
442 if reason != waitReasonSleep {
443 checkTimeouts()
444 }
445 mp := acquirem()
446 gp := mp.curg
447 status := readgstatus(gp)
448 if status != _Grunning && status != _Gscanrunning {
449 throw("gopark: bad g status")
450 }
451 mp.waitlock = lock
452 mp.waitunlockf = unlockf
453 gp.waitreason = reason
454 mp.waitTraceBlockReason = traceReason
455 mp.waitTraceSkip = traceskip
456 releasem(mp)
457
458 mcall(park_m)
459 }
460
461
462
463 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
464 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
465 }
466
467
468
469
470
471
472
473
474
475
476
477 func goready(gp *g, traceskip int) {
478 systemstack(func() {
479 ready(gp, traceskip, true)
480 })
481 }
482
483
484 func acquireSudog() *sudog {
485
486
487
488
489
490
491
492
493 mp := acquirem()
494 pp := mp.p.ptr()
495 if len(pp.sudogcache) == 0 {
496 lock(&sched.sudoglock)
497
498 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
499 s := sched.sudogcache
500 sched.sudogcache = s.next
501 s.next = nil
502 pp.sudogcache = append(pp.sudogcache, s)
503 }
504 unlock(&sched.sudoglock)
505
506 if len(pp.sudogcache) == 0 {
507 pp.sudogcache = append(pp.sudogcache, new(sudog))
508 }
509 }
510 n := len(pp.sudogcache)
511 s := pp.sudogcache[n-1]
512 pp.sudogcache[n-1] = nil
513 pp.sudogcache = pp.sudogcache[:n-1]
514 if s.elem != nil {
515 throw("acquireSudog: found s.elem != nil in cache")
516 }
517 releasem(mp)
518 return s
519 }
520
521
522 func releaseSudog(s *sudog) {
523 if s.elem != nil {
524 throw("runtime: sudog with non-nil elem")
525 }
526 if s.isSelect {
527 throw("runtime: sudog with non-false isSelect")
528 }
529 if s.next != nil {
530 throw("runtime: sudog with non-nil next")
531 }
532 if s.prev != nil {
533 throw("runtime: sudog with non-nil prev")
534 }
535 if s.waitlink != nil {
536 throw("runtime: sudog with non-nil waitlink")
537 }
538 if s.c != nil {
539 throw("runtime: sudog with non-nil c")
540 }
541 gp := getg()
542 if gp.param != nil {
543 throw("runtime: releaseSudog with non-nil gp.param")
544 }
545 mp := acquirem()
546 pp := mp.p.ptr()
547 if len(pp.sudogcache) == cap(pp.sudogcache) {
548
549 var first, last *sudog
550 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
551 n := len(pp.sudogcache)
552 p := pp.sudogcache[n-1]
553 pp.sudogcache[n-1] = nil
554 pp.sudogcache = pp.sudogcache[:n-1]
555 if first == nil {
556 first = p
557 } else {
558 last.next = p
559 }
560 last = p
561 }
562 lock(&sched.sudoglock)
563 last.next = sched.sudogcache
564 sched.sudogcache = first
565 unlock(&sched.sudoglock)
566 }
567 pp.sudogcache = append(pp.sudogcache, s)
568 releasem(mp)
569 }
570
571
572 func badmcall(fn func(*g)) {
573 throw("runtime: mcall called on m->g0 stack")
574 }
575
576 func badmcall2(fn func(*g)) {
577 throw("runtime: mcall function returned")
578 }
579
580 func badreflectcall() {
581 panic(plainError("arg size to reflect.call more than 1GB"))
582 }
583
584
585
586 func badmorestackg0() {
587 if !crashStackImplemented {
588 writeErrStr("fatal: morestack on g0\n")
589 return
590 }
591
592 g := getg()
593 switchToCrashStack(func() {
594 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
595 g.m.traceback = 2
596 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
597 print("\n")
598
599 throw("morestack on g0")
600 })
601 }
602
603
604
605 func badmorestackgsignal() {
606 writeErrStr("fatal: morestack on gsignal\n")
607 }
608
609
610 func badctxt() {
611 throw("ctxt != 0")
612 }
613
614
615
616 var gcrash g
617
618 var crashingG atomic.Pointer[g]
619
620
621
622
623
624
625
626
627
628 func switchToCrashStack(fn func()) {
629 me := getg()
630 if crashingG.CompareAndSwapNoWB(nil, me) {
631 switchToCrashStack0(fn)
632 abort()
633 }
634 if crashingG.Load() == me {
635
636 writeErrStr("fatal: recursive switchToCrashStack\n")
637 abort()
638 }
639
640 usleep_no_g(100)
641 writeErrStr("fatal: concurrent switchToCrashStack\n")
642 abort()
643 }
644
645
646
647
648 const crashStackImplemented = GOOS != "windows"
649
650
651 func switchToCrashStack0(fn func())
652
653 func lockedOSThread() bool {
654 gp := getg()
655 return gp.lockedm != 0 && gp.m.lockedg != 0
656 }
657
658 var (
659
660
661
662
663
664
665 allglock mutex
666 allgs []*g
667
668
669
670
671
672
673
674
675
676
677
678
679
680 allglen uintptr
681 allgptr **g
682 )
683
684 func allgadd(gp *g) {
685 if readgstatus(gp) == _Gidle {
686 throw("allgadd: bad status Gidle")
687 }
688
689 lock(&allglock)
690 allgs = append(allgs, gp)
691 if &allgs[0] != allgptr {
692 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
693 }
694 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
695 unlock(&allglock)
696 }
697
698
699
700
701 func allGsSnapshot() []*g {
702 assertWorldStoppedOrLockHeld(&allglock)
703
704
705
706
707
708
709 return allgs[:len(allgs):len(allgs)]
710 }
711
712
713 func atomicAllG() (**g, uintptr) {
714 length := atomic.Loaduintptr(&allglen)
715 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
716 return ptr, length
717 }
718
719
720 func atomicAllGIndex(ptr **g, i uintptr) *g {
721 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
722 }
723
724
725
726
727 func forEachG(fn func(gp *g)) {
728 lock(&allglock)
729 for _, gp := range allgs {
730 fn(gp)
731 }
732 unlock(&allglock)
733 }
734
735
736
737
738
739 func forEachGRace(fn func(gp *g)) {
740 ptr, length := atomicAllG()
741 for i := uintptr(0); i < length; i++ {
742 gp := atomicAllGIndex(ptr, i)
743 fn(gp)
744 }
745 return
746 }
747
748 const (
749
750
751 _GoidCacheBatch = 16
752 )
753
754
755
756 func cpuinit(env string) {
757 switch GOOS {
758 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
759 cpu.DebugOptions = true
760 }
761 cpu.Initialize(env)
762
763
764
765 switch GOARCH {
766 case "386", "amd64":
767 x86HasPOPCNT = cpu.X86.HasPOPCNT
768 x86HasSSE41 = cpu.X86.HasSSE41
769 x86HasFMA = cpu.X86.HasFMA
770
771 case "arm":
772 armHasVFPv4 = cpu.ARM.HasVFPv4
773
774 case "arm64":
775 arm64HasATOMICS = cpu.ARM64.HasATOMICS
776
777 case "loong64":
778 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
779 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
780 loong64HasLSX = cpu.Loong64.HasLSX
781 }
782 }
783
784
785
786
787 func getGodebugEarly() string {
788 const prefix = "GODEBUG="
789 var env string
790 switch GOOS {
791 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
792
793
794
795 n := int32(0)
796 for argv_index(argv, argc+1+n) != nil {
797 n++
798 }
799
800 for i := int32(0); i < n; i++ {
801 p := argv_index(argv, argc+1+i)
802 s := unsafe.String(p, findnull(p))
803
804 if stringslite.HasPrefix(s, prefix) {
805 env = gostring(p)[len(prefix):]
806 break
807 }
808 }
809 }
810 return env
811 }
812
813
814
815
816
817
818
819
820
821 func schedinit() {
822 lockInit(&sched.lock, lockRankSched)
823 lockInit(&sched.sysmonlock, lockRankSysmon)
824 lockInit(&sched.deferlock, lockRankDefer)
825 lockInit(&sched.sudoglock, lockRankSudog)
826 lockInit(&deadlock, lockRankDeadlock)
827 lockInit(&paniclk, lockRankPanic)
828 lockInit(&allglock, lockRankAllg)
829 lockInit(&allpLock, lockRankAllp)
830 lockInit(&reflectOffs.lock, lockRankReflectOffs)
831 lockInit(&finlock, lockRankFin)
832 lockInit(&cpuprof.lock, lockRankCpuprof)
833 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
834 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
835 traceLockInit()
836
837
838
839 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
840
841 lockVerifyMSize()
842
843
844
845 gp := getg()
846 if raceenabled {
847 gp.racectx, raceprocctx0 = raceinit()
848 }
849
850 sched.maxmcount = 10000
851 crashFD.Store(^uintptr(0))
852
853
854 worldStopped()
855
856 ticks.init()
857 moduledataverify()
858 stackinit()
859 mallocinit()
860 godebug := getGodebugEarly()
861 cpuinit(godebug)
862 randinit()
863 alginit()
864 mcommoninit(gp.m, -1)
865 modulesinit()
866 typelinksinit()
867 itabsinit()
868 stkobjinit()
869
870 sigsave(&gp.m.sigmask)
871 initSigmask = gp.m.sigmask
872
873 goargs()
874 goenvs()
875 secure()
876 checkfds()
877 parsedebugvars()
878 gcinit()
879
880
881
882 gcrash.stack = stackalloc(16384)
883 gcrash.stackguard0 = gcrash.stack.lo + 1000
884 gcrash.stackguard1 = gcrash.stack.lo + 1000
885
886
887
888
889
890 if disableMemoryProfiling {
891 MemProfileRate = 0
892 }
893
894
895 mProfStackInit(gp.m)
896
897 lock(&sched.lock)
898 sched.lastpoll.Store(nanotime())
899 procs := ncpu
900 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
901 procs = n
902 }
903 if procresize(procs) != nil {
904 throw("unknown runnable goroutine during bootstrap")
905 }
906 unlock(&sched.lock)
907
908
909 worldStarted()
910
911 if buildVersion == "" {
912
913
914 buildVersion = "unknown"
915 }
916 if len(modinfo) == 1 {
917
918
919 modinfo = ""
920 }
921 }
922
923 func dumpgstatus(gp *g) {
924 thisg := getg()
925 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
926 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
927 }
928
929
930 func checkmcount() {
931 assertLockHeld(&sched.lock)
932
933
934
935
936
937
938
939
940
941 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
942 if count > sched.maxmcount {
943 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
944 throw("thread exhaustion")
945 }
946 }
947
948
949
950
951
952 func mReserveID() int64 {
953 assertLockHeld(&sched.lock)
954
955 if sched.mnext+1 < sched.mnext {
956 throw("runtime: thread ID overflow")
957 }
958 id := sched.mnext
959 sched.mnext++
960 checkmcount()
961 return id
962 }
963
964
965 func mcommoninit(mp *m, id int64) {
966 gp := getg()
967
968
969 if gp != gp.m.g0 {
970 callers(1, mp.createstack[:])
971 }
972
973 lock(&sched.lock)
974
975 if id >= 0 {
976 mp.id = id
977 } else {
978 mp.id = mReserveID()
979 }
980
981 mrandinit(mp)
982
983 mpreinit(mp)
984 if mp.gsignal != nil {
985 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
986 }
987
988
989
990 mp.alllink = allm
991
992
993
994 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
995 unlock(&sched.lock)
996
997
998 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
999 mp.cgoCallers = new(cgoCallers)
1000 }
1001 mProfStackInit(mp)
1002 }
1003
1004
1005
1006
1007
1008 func mProfStackInit(mp *m) {
1009 if debug.profstackdepth == 0 {
1010
1011
1012 return
1013 }
1014 mp.profStack = makeProfStackFP()
1015 mp.mLockProfile.stack = makeProfStackFP()
1016 }
1017
1018
1019
1020
1021 func makeProfStackFP() []uintptr {
1022
1023
1024
1025
1026
1027
1028 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1029 }
1030
1031
1032
1033 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1034
1035
1036 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1037
1038 func (mp *m) becomeSpinning() {
1039 mp.spinning = true
1040 sched.nmspinning.Add(1)
1041 sched.needspinning.Store(0)
1042 }
1043
1044 func (mp *m) hasCgoOnStack() bool {
1045 return mp.ncgo > 0 || mp.isextra
1046 }
1047
1048 const (
1049
1050
1051 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1052
1053
1054
1055 osHasLowResClockInt = goos.IsWindows
1056
1057
1058
1059 osHasLowResClock = osHasLowResClockInt > 0
1060 )
1061
1062
1063 func ready(gp *g, traceskip int, next bool) {
1064 status := readgstatus(gp)
1065
1066
1067 mp := acquirem()
1068 if status&^_Gscan != _Gwaiting {
1069 dumpgstatus(gp)
1070 throw("bad g->status in ready")
1071 }
1072
1073
1074 trace := traceAcquire()
1075 casgstatus(gp, _Gwaiting, _Grunnable)
1076 if trace.ok() {
1077 trace.GoUnpark(gp, traceskip)
1078 traceRelease(trace)
1079 }
1080 runqput(mp.p.ptr(), gp, next)
1081 wakep()
1082 releasem(mp)
1083 }
1084
1085
1086
1087 const freezeStopWait = 0x7fffffff
1088
1089
1090
1091 var freezing atomic.Bool
1092
1093
1094
1095
1096 func freezetheworld() {
1097 freezing.Store(true)
1098 if debug.dontfreezetheworld > 0 {
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 usleep(1000)
1124 return
1125 }
1126
1127
1128
1129
1130 for i := 0; i < 5; i++ {
1131
1132 sched.stopwait = freezeStopWait
1133 sched.gcwaiting.Store(true)
1134
1135 if !preemptall() {
1136 break
1137 }
1138 usleep(1000)
1139 }
1140
1141 usleep(1000)
1142 preemptall()
1143 usleep(1000)
1144 }
1145
1146
1147
1148
1149
1150 func readgstatus(gp *g) uint32 {
1151 return gp.atomicstatus.Load()
1152 }
1153
1154
1155
1156
1157
1158 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1159 success := false
1160
1161
1162 switch oldval {
1163 default:
1164 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1165 dumpgstatus(gp)
1166 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1167 case _Gscanrunnable,
1168 _Gscanwaiting,
1169 _Gscanrunning,
1170 _Gscansyscall,
1171 _Gscanpreempted:
1172 if newval == oldval&^_Gscan {
1173 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1174 }
1175 }
1176 if !success {
1177 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1178 dumpgstatus(gp)
1179 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1180 }
1181 releaseLockRankAndM(lockRankGscan)
1182 }
1183
1184
1185
1186 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1187 switch oldval {
1188 case _Grunnable,
1189 _Grunning,
1190 _Gwaiting,
1191 _Gsyscall:
1192 if newval == oldval|_Gscan {
1193 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1194 if r {
1195 acquireLockRankAndM(lockRankGscan)
1196 }
1197 return r
1198
1199 }
1200 }
1201 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1202 throw("castogscanstatus")
1203 panic("not reached")
1204 }
1205
1206
1207
1208 var casgstatusAlwaysTrack = false
1209
1210
1211
1212
1213
1214
1215
1216 func casgstatus(gp *g, oldval, newval uint32) {
1217 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1218 systemstack(func() {
1219
1220
1221 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1222 throw("casgstatus: bad incoming values")
1223 })
1224 }
1225
1226 lockWithRankMayAcquire(nil, lockRankGscan)
1227
1228
1229 const yieldDelay = 5 * 1000
1230 var nextYield int64
1231
1232
1233
1234 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1235 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1236 systemstack(func() {
1237
1238
1239 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1240 })
1241 }
1242 if i == 0 {
1243 nextYield = nanotime() + yieldDelay
1244 }
1245 if nanotime() < nextYield {
1246 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1247 procyield(1)
1248 }
1249 } else {
1250 osyield()
1251 nextYield = nanotime() + yieldDelay/2
1252 }
1253 }
1254
1255 if gp.syncGroup != nil {
1256 systemstack(func() {
1257 gp.syncGroup.changegstatus(gp, oldval, newval)
1258 })
1259 }
1260
1261 if oldval == _Grunning {
1262
1263 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1264 gp.tracking = true
1265 }
1266 gp.trackingSeq++
1267 }
1268 if !gp.tracking {
1269 return
1270 }
1271
1272
1273
1274
1275
1276
1277 switch oldval {
1278 case _Grunnable:
1279
1280
1281
1282 now := nanotime()
1283 gp.runnableTime += now - gp.trackingStamp
1284 gp.trackingStamp = 0
1285 case _Gwaiting:
1286 if !gp.waitreason.isMutexWait() {
1287
1288 break
1289 }
1290
1291
1292
1293
1294
1295 now := nanotime()
1296 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1297 gp.trackingStamp = 0
1298 }
1299 switch newval {
1300 case _Gwaiting:
1301 if !gp.waitreason.isMutexWait() {
1302
1303 break
1304 }
1305
1306 now := nanotime()
1307 gp.trackingStamp = now
1308 case _Grunnable:
1309
1310
1311 now := nanotime()
1312 gp.trackingStamp = now
1313 case _Grunning:
1314
1315
1316
1317 gp.tracking = false
1318 sched.timeToRun.record(gp.runnableTime)
1319 gp.runnableTime = 0
1320 }
1321 }
1322
1323
1324
1325
1326 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1327
1328 gp.waitreason = reason
1329 casgstatus(gp, old, _Gwaiting)
1330 }
1331
1332
1333
1334
1335
1336 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1337 if !reason.isWaitingForGC() {
1338 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1339 }
1340 casGToWaiting(gp, old, reason)
1341 }
1342
1343
1344
1345
1346
1347 func casGToPreemptScan(gp *g, old, new uint32) {
1348 if old != _Grunning || new != _Gscan|_Gpreempted {
1349 throw("bad g transition")
1350 }
1351 acquireLockRankAndM(lockRankGscan)
1352 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1353 }
1354
1355
1356
1357
1358
1359
1360 }
1361
1362
1363
1364
1365 func casGFromPreempted(gp *g, old, new uint32) bool {
1366 if old != _Gpreempted || new != _Gwaiting {
1367 throw("bad g transition")
1368 }
1369 gp.waitreason = waitReasonPreempted
1370 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1371 return false
1372 }
1373 if sg := gp.syncGroup; sg != nil {
1374 sg.changegstatus(gp, _Gpreempted, _Gwaiting)
1375 }
1376 return true
1377 }
1378
1379
1380 type stwReason uint8
1381
1382
1383
1384
1385 const (
1386 stwUnknown stwReason = iota
1387 stwGCMarkTerm
1388 stwGCSweepTerm
1389 stwWriteHeapDump
1390 stwGoroutineProfile
1391 stwGoroutineProfileCleanup
1392 stwAllGoroutinesStack
1393 stwReadMemStats
1394 stwAllThreadsSyscall
1395 stwGOMAXPROCS
1396 stwStartTrace
1397 stwStopTrace
1398 stwForTestCountPagesInUse
1399 stwForTestReadMetricsSlow
1400 stwForTestReadMemStatsSlow
1401 stwForTestPageCachePagesLeaked
1402 stwForTestResetDebugLog
1403 )
1404
1405 func (r stwReason) String() string {
1406 return stwReasonStrings[r]
1407 }
1408
1409 func (r stwReason) isGC() bool {
1410 return r == stwGCMarkTerm || r == stwGCSweepTerm
1411 }
1412
1413
1414
1415
1416 var stwReasonStrings = [...]string{
1417 stwUnknown: "unknown",
1418 stwGCMarkTerm: "GC mark termination",
1419 stwGCSweepTerm: "GC sweep termination",
1420 stwWriteHeapDump: "write heap dump",
1421 stwGoroutineProfile: "goroutine profile",
1422 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1423 stwAllGoroutinesStack: "all goroutines stack trace",
1424 stwReadMemStats: "read mem stats",
1425 stwAllThreadsSyscall: "AllThreadsSyscall",
1426 stwGOMAXPROCS: "GOMAXPROCS",
1427 stwStartTrace: "start trace",
1428 stwStopTrace: "stop trace",
1429 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1430 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1431 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1432 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1433 stwForTestResetDebugLog: "ResetDebugLog (test)",
1434 }
1435
1436
1437
1438 type worldStop struct {
1439 reason stwReason
1440 startedStopping int64
1441 finishedStopping int64
1442 stoppingCPUTime int64
1443 }
1444
1445
1446
1447
1448 var stopTheWorldContext worldStop
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 func stopTheWorld(reason stwReason) worldStop {
1468 semacquire(&worldsema)
1469 gp := getg()
1470 gp.m.preemptoff = reason.String()
1471 systemstack(func() {
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1487 stopTheWorldContext = stopTheWorldWithSema(reason)
1488 casgstatus(gp, _Gwaiting, _Grunning)
1489 })
1490 return stopTheWorldContext
1491 }
1492
1493
1494
1495
1496 func startTheWorld(w worldStop) {
1497 systemstack(func() { startTheWorldWithSema(0, w) })
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 mp := acquirem()
1515 mp.preemptoff = ""
1516 semrelease1(&worldsema, true, 0)
1517 releasem(mp)
1518 }
1519
1520
1521
1522
1523 func stopTheWorldGC(reason stwReason) worldStop {
1524 semacquire(&gcsema)
1525 return stopTheWorld(reason)
1526 }
1527
1528
1529
1530
1531 func startTheWorldGC(w worldStop) {
1532 startTheWorld(w)
1533 semrelease(&gcsema)
1534 }
1535
1536
1537 var worldsema uint32 = 1
1538
1539
1540
1541
1542
1543
1544
1545 var gcsema uint32 = 1
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 func stopTheWorldWithSema(reason stwReason) worldStop {
1578 trace := traceAcquire()
1579 if trace.ok() {
1580 trace.STWStart(reason)
1581 traceRelease(trace)
1582 }
1583 gp := getg()
1584
1585
1586
1587 if gp.m.locks > 0 {
1588 throw("stopTheWorld: holding locks")
1589 }
1590
1591 lock(&sched.lock)
1592 start := nanotime()
1593 sched.stopwait = gomaxprocs
1594 sched.gcwaiting.Store(true)
1595 preemptall()
1596
1597 gp.m.p.ptr().status = _Pgcstop
1598 gp.m.p.ptr().gcStopTime = start
1599 sched.stopwait--
1600
1601 trace = traceAcquire()
1602 for _, pp := range allp {
1603 s := pp.status
1604 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1605 if trace.ok() {
1606 trace.ProcSteal(pp, false)
1607 }
1608 pp.syscalltick++
1609 pp.gcStopTime = nanotime()
1610 sched.stopwait--
1611 }
1612 }
1613 if trace.ok() {
1614 traceRelease(trace)
1615 }
1616
1617
1618 now := nanotime()
1619 for {
1620 pp, _ := pidleget(now)
1621 if pp == nil {
1622 break
1623 }
1624 pp.status = _Pgcstop
1625 pp.gcStopTime = nanotime()
1626 sched.stopwait--
1627 }
1628 wait := sched.stopwait > 0
1629 unlock(&sched.lock)
1630
1631
1632 if wait {
1633 for {
1634
1635 if notetsleep(&sched.stopnote, 100*1000) {
1636 noteclear(&sched.stopnote)
1637 break
1638 }
1639 preemptall()
1640 }
1641 }
1642
1643 finish := nanotime()
1644 startTime := finish - start
1645 if reason.isGC() {
1646 sched.stwStoppingTimeGC.record(startTime)
1647 } else {
1648 sched.stwStoppingTimeOther.record(startTime)
1649 }
1650
1651
1652
1653
1654
1655 stoppingCPUTime := int64(0)
1656 bad := ""
1657 if sched.stopwait != 0 {
1658 bad = "stopTheWorld: not stopped (stopwait != 0)"
1659 } else {
1660 for _, pp := range allp {
1661 if pp.status != _Pgcstop {
1662 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1663 }
1664 if pp.gcStopTime == 0 && bad == "" {
1665 bad = "stopTheWorld: broken CPU time accounting"
1666 }
1667 stoppingCPUTime += finish - pp.gcStopTime
1668 pp.gcStopTime = 0
1669 }
1670 }
1671 if freezing.Load() {
1672
1673
1674
1675
1676 lock(&deadlock)
1677 lock(&deadlock)
1678 }
1679 if bad != "" {
1680 throw(bad)
1681 }
1682
1683 worldStopped()
1684
1685 return worldStop{
1686 reason: reason,
1687 startedStopping: start,
1688 finishedStopping: finish,
1689 stoppingCPUTime: stoppingCPUTime,
1690 }
1691 }
1692
1693
1694
1695
1696
1697
1698
1699 func startTheWorldWithSema(now int64, w worldStop) int64 {
1700 assertWorldStopped()
1701
1702 mp := acquirem()
1703 if netpollinited() {
1704 list, delta := netpoll(0)
1705 injectglist(&list)
1706 netpollAdjustWaiters(delta)
1707 }
1708 lock(&sched.lock)
1709
1710 procs := gomaxprocs
1711 if newprocs != 0 {
1712 procs = newprocs
1713 newprocs = 0
1714 }
1715 p1 := procresize(procs)
1716 sched.gcwaiting.Store(false)
1717 if sched.sysmonwait.Load() {
1718 sched.sysmonwait.Store(false)
1719 notewakeup(&sched.sysmonnote)
1720 }
1721 unlock(&sched.lock)
1722
1723 worldStarted()
1724
1725 for p1 != nil {
1726 p := p1
1727 p1 = p1.link.ptr()
1728 if p.m != 0 {
1729 mp := p.m.ptr()
1730 p.m = 0
1731 if mp.nextp != 0 {
1732 throw("startTheWorld: inconsistent mp->nextp")
1733 }
1734 mp.nextp.set(p)
1735 notewakeup(&mp.park)
1736 } else {
1737
1738 newm(nil, p, -1)
1739 }
1740 }
1741
1742
1743 if now == 0 {
1744 now = nanotime()
1745 }
1746 totalTime := now - w.startedStopping
1747 if w.reason.isGC() {
1748 sched.stwTotalTimeGC.record(totalTime)
1749 } else {
1750 sched.stwTotalTimeOther.record(totalTime)
1751 }
1752 trace := traceAcquire()
1753 if trace.ok() {
1754 trace.STWDone()
1755 traceRelease(trace)
1756 }
1757
1758
1759
1760
1761 wakep()
1762
1763 releasem(mp)
1764
1765 return now
1766 }
1767
1768
1769
1770 func usesLibcall() bool {
1771 switch GOOS {
1772 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1773 return true
1774 case "openbsd":
1775 return GOARCH != "mips64"
1776 }
1777 return false
1778 }
1779
1780
1781
1782 func mStackIsSystemAllocated() bool {
1783 switch GOOS {
1784 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1785 return true
1786 case "openbsd":
1787 return GOARCH != "mips64"
1788 }
1789 return false
1790 }
1791
1792
1793
1794 func mstart()
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 func mstart0() {
1806 gp := getg()
1807
1808 osStack := gp.stack.lo == 0
1809 if osStack {
1810
1811
1812
1813
1814
1815
1816
1817
1818 size := gp.stack.hi
1819 if size == 0 {
1820 size = 16384 * sys.StackGuardMultiplier
1821 }
1822 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1823 gp.stack.lo = gp.stack.hi - size + 1024
1824 }
1825
1826
1827 gp.stackguard0 = gp.stack.lo + stackGuard
1828
1829
1830 gp.stackguard1 = gp.stackguard0
1831 mstart1()
1832
1833
1834 if mStackIsSystemAllocated() {
1835
1836
1837
1838 osStack = true
1839 }
1840 mexit(osStack)
1841 }
1842
1843
1844
1845
1846
1847 func mstart1() {
1848 gp := getg()
1849
1850 if gp != gp.m.g0 {
1851 throw("bad runtime·mstart")
1852 }
1853
1854
1855
1856
1857
1858
1859
1860 gp.sched.g = guintptr(unsafe.Pointer(gp))
1861 gp.sched.pc = sys.GetCallerPC()
1862 gp.sched.sp = sys.GetCallerSP()
1863
1864 asminit()
1865 minit()
1866
1867
1868
1869 if gp.m == &m0 {
1870 mstartm0()
1871 }
1872
1873 if debug.dataindependenttiming == 1 {
1874 sys.EnableDIT()
1875 }
1876
1877 if fn := gp.m.mstartfn; fn != nil {
1878 fn()
1879 }
1880
1881 if gp.m != &m0 {
1882 acquirep(gp.m.nextp.ptr())
1883 gp.m.nextp = 0
1884 }
1885 schedule()
1886 }
1887
1888
1889
1890
1891
1892
1893
1894 func mstartm0() {
1895
1896
1897
1898 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1899 cgoHasExtraM = true
1900 newextram()
1901 }
1902 initsig(false)
1903 }
1904
1905
1906
1907
1908 func mPark() {
1909 gp := getg()
1910 notesleep(&gp.m.park)
1911 noteclear(&gp.m.park)
1912 }
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924 func mexit(osStack bool) {
1925 mp := getg().m
1926
1927 if mp == &m0 {
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 handoffp(releasep())
1940 lock(&sched.lock)
1941 sched.nmfreed++
1942 checkdead()
1943 unlock(&sched.lock)
1944 mPark()
1945 throw("locked m0 woke up")
1946 }
1947
1948 sigblock(true)
1949 unminit()
1950
1951
1952 if mp.gsignal != nil {
1953 stackfree(mp.gsignal.stack)
1954
1955
1956
1957
1958 mp.gsignal = nil
1959 }
1960
1961
1962 lock(&sched.lock)
1963 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1964 if *pprev == mp {
1965 *pprev = mp.alllink
1966 goto found
1967 }
1968 }
1969 throw("m not found in allm")
1970 found:
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985 mp.freeWait.Store(freeMWait)
1986 mp.freelink = sched.freem
1987 sched.freem = mp
1988 unlock(&sched.lock)
1989
1990 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1991 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1992
1993
1994 handoffp(releasep())
1995
1996
1997
1998
1999
2000 lock(&sched.lock)
2001 sched.nmfreed++
2002 checkdead()
2003 unlock(&sched.lock)
2004
2005 if GOOS == "darwin" || GOOS == "ios" {
2006
2007
2008 if mp.signalPending.Load() != 0 {
2009 pendingPreemptSignals.Add(-1)
2010 }
2011 }
2012
2013
2014
2015 mdestroy(mp)
2016
2017 if osStack {
2018
2019 mp.freeWait.Store(freeMRef)
2020
2021
2022
2023 return
2024 }
2025
2026
2027
2028
2029
2030 exitThread(&mp.freeWait)
2031 }
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 func forEachP(reason waitReason, fn func(*p)) {
2044 systemstack(func() {
2045 gp := getg().m.curg
2046
2047
2048
2049
2050
2051
2052
2053
2054 casGToWaitingForGC(gp, _Grunning, reason)
2055 forEachPInternal(fn)
2056 casgstatus(gp, _Gwaiting, _Grunning)
2057 })
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 func forEachPInternal(fn func(*p)) {
2070 mp := acquirem()
2071 pp := getg().m.p.ptr()
2072
2073 lock(&sched.lock)
2074 if sched.safePointWait != 0 {
2075 throw("forEachP: sched.safePointWait != 0")
2076 }
2077 sched.safePointWait = gomaxprocs - 1
2078 sched.safePointFn = fn
2079
2080
2081 for _, p2 := range allp {
2082 if p2 != pp {
2083 atomic.Store(&p2.runSafePointFn, 1)
2084 }
2085 }
2086 preemptall()
2087
2088
2089
2090
2091
2092
2093
2094 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2095 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2096 fn(p)
2097 sched.safePointWait--
2098 }
2099 }
2100
2101 wait := sched.safePointWait > 0
2102 unlock(&sched.lock)
2103
2104
2105 fn(pp)
2106
2107
2108
2109 for _, p2 := range allp {
2110 s := p2.status
2111
2112
2113
2114 trace := traceAcquire()
2115 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2116 if trace.ok() {
2117
2118 trace.ProcSteal(p2, false)
2119 traceRelease(trace)
2120 }
2121 p2.syscalltick++
2122 handoffp(p2)
2123 } else if trace.ok() {
2124 traceRelease(trace)
2125 }
2126 }
2127
2128
2129 if wait {
2130 for {
2131
2132
2133
2134
2135 if notetsleep(&sched.safePointNote, 100*1000) {
2136 noteclear(&sched.safePointNote)
2137 break
2138 }
2139 preemptall()
2140 }
2141 }
2142 if sched.safePointWait != 0 {
2143 throw("forEachP: not done")
2144 }
2145 for _, p2 := range allp {
2146 if p2.runSafePointFn != 0 {
2147 throw("forEachP: P did not run fn")
2148 }
2149 }
2150
2151 lock(&sched.lock)
2152 sched.safePointFn = nil
2153 unlock(&sched.lock)
2154 releasem(mp)
2155 }
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168 func runSafePointFn() {
2169 p := getg().m.p.ptr()
2170
2171
2172
2173 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2174 return
2175 }
2176 sched.safePointFn(p)
2177 lock(&sched.lock)
2178 sched.safePointWait--
2179 if sched.safePointWait == 0 {
2180 notewakeup(&sched.safePointNote)
2181 }
2182 unlock(&sched.lock)
2183 }
2184
2185
2186
2187
2188 var cgoThreadStart unsafe.Pointer
2189
2190 type cgothreadstart struct {
2191 g guintptr
2192 tls *uint64
2193 fn unsafe.Pointer
2194 }
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 func allocm(pp *p, fn func(), id int64) *m {
2206 allocmLock.rlock()
2207
2208
2209
2210
2211 acquirem()
2212
2213 gp := getg()
2214 if gp.m.p == 0 {
2215 acquirep(pp)
2216 }
2217
2218
2219
2220 if sched.freem != nil {
2221 lock(&sched.lock)
2222 var newList *m
2223 for freem := sched.freem; freem != nil; {
2224
2225 wait := freem.freeWait.Load()
2226 if wait == freeMWait {
2227 next := freem.freelink
2228 freem.freelink = newList
2229 newList = freem
2230 freem = next
2231 continue
2232 }
2233
2234
2235
2236 if traceEnabled() || traceShuttingDown() {
2237 traceThreadDestroy(freem)
2238 }
2239
2240
2241
2242 if wait == freeMStack {
2243
2244
2245
2246 systemstack(func() {
2247 stackfree(freem.g0.stack)
2248 })
2249 }
2250 freem = freem.freelink
2251 }
2252 sched.freem = newList
2253 unlock(&sched.lock)
2254 }
2255
2256 mp := new(m)
2257 mp.mstartfn = fn
2258 mcommoninit(mp, id)
2259
2260
2261
2262 if iscgo || mStackIsSystemAllocated() {
2263 mp.g0 = malg(-1)
2264 } else {
2265 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2266 }
2267 mp.g0.m = mp
2268
2269 if pp == gp.m.p.ptr() {
2270 releasep()
2271 }
2272
2273 releasem(gp.m)
2274 allocmLock.runlock()
2275 return mp
2276 }
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 func needm(signal bool) {
2318 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2319
2320
2321
2322
2323
2324
2325 writeErrStr("fatal error: cgo callback before cgo call\n")
2326 exit(1)
2327 }
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337 var sigmask sigset
2338 sigsave(&sigmask)
2339 sigblock(false)
2340
2341
2342
2343
2344 mp, last := getExtraM()
2345
2346
2347
2348
2349
2350
2351
2352
2353 mp.needextram = last
2354
2355
2356 mp.sigmask = sigmask
2357
2358
2359
2360 osSetupTLS(mp)
2361
2362
2363
2364 setg(mp.g0)
2365 sp := sys.GetCallerSP()
2366 callbackUpdateSystemStack(mp, sp, signal)
2367
2368
2369
2370
2371 mp.isExtraInC = false
2372
2373
2374 asminit()
2375 minit()
2376
2377
2378
2379
2380
2381
2382 var trace traceLocker
2383 if !signal {
2384 trace = traceAcquire()
2385 }
2386
2387
2388 casgstatus(mp.curg, _Gdead, _Gsyscall)
2389 sched.ngsys.Add(-1)
2390
2391 if !signal {
2392 if trace.ok() {
2393 trace.GoCreateSyscall(mp.curg)
2394 traceRelease(trace)
2395 }
2396 }
2397 mp.isExtraInSig = signal
2398 }
2399
2400
2401
2402
2403 func needAndBindM() {
2404 needm(false)
2405
2406 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2407 cgoBindM()
2408 }
2409 }
2410
2411
2412
2413
2414 func newextram() {
2415 c := extraMWaiters.Swap(0)
2416 if c > 0 {
2417 for i := uint32(0); i < c; i++ {
2418 oneNewExtraM()
2419 }
2420 } else if extraMLength.Load() == 0 {
2421
2422 oneNewExtraM()
2423 }
2424 }
2425
2426
2427 func oneNewExtraM() {
2428
2429
2430
2431
2432
2433 mp := allocm(nil, nil, -1)
2434 gp := malg(4096)
2435 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2436 gp.sched.sp = gp.stack.hi
2437 gp.sched.sp -= 4 * goarch.PtrSize
2438 gp.sched.lr = 0
2439 gp.sched.g = guintptr(unsafe.Pointer(gp))
2440 gp.syscallpc = gp.sched.pc
2441 gp.syscallsp = gp.sched.sp
2442 gp.stktopsp = gp.sched.sp
2443
2444
2445
2446
2447 casgstatus(gp, _Gidle, _Gdead)
2448 gp.m = mp
2449 mp.curg = gp
2450 mp.isextra = true
2451
2452 mp.isExtraInC = true
2453 mp.lockedInt++
2454 mp.lockedg.set(gp)
2455 gp.lockedm.set(mp)
2456 gp.goid = sched.goidgen.Add(1)
2457 if raceenabled {
2458 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2459 }
2460
2461 allgadd(gp)
2462
2463
2464
2465
2466
2467 sched.ngsys.Add(1)
2468
2469
2470 addExtraM(mp)
2471 }
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506 func dropm() {
2507
2508
2509
2510 mp := getg().m
2511
2512
2513
2514
2515
2516 var trace traceLocker
2517 if !mp.isExtraInSig {
2518 trace = traceAcquire()
2519 }
2520
2521
2522 casgstatus(mp.curg, _Gsyscall, _Gdead)
2523 mp.curg.preemptStop = false
2524 sched.ngsys.Add(1)
2525
2526 if !mp.isExtraInSig {
2527 if trace.ok() {
2528 trace.GoDestroySyscall()
2529 traceRelease(trace)
2530 }
2531 }
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546 mp.syscalltick--
2547
2548
2549
2550 mp.curg.trace.reset()
2551
2552
2553
2554
2555 if traceEnabled() || traceShuttingDown() {
2556
2557
2558
2559
2560
2561
2562
2563 lock(&sched.lock)
2564 traceThreadDestroy(mp)
2565 unlock(&sched.lock)
2566 }
2567 mp.isExtraInSig = false
2568
2569
2570
2571
2572
2573 sigmask := mp.sigmask
2574 sigblock(false)
2575 unminit()
2576
2577 setg(nil)
2578
2579
2580
2581 g0 := mp.g0
2582 g0.stack.hi = 0
2583 g0.stack.lo = 0
2584 g0.stackguard0 = 0
2585 g0.stackguard1 = 0
2586 mp.g0StackAccurate = false
2587
2588 putExtraM(mp)
2589
2590 msigrestore(sigmask)
2591 }
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613 func cgoBindM() {
2614 if GOOS == "windows" || GOOS == "plan9" {
2615 fatal("bindm in unexpected GOOS")
2616 }
2617 g := getg()
2618 if g.m.g0 != g {
2619 fatal("the current g is not g0")
2620 }
2621 if _cgo_bindm != nil {
2622 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2623 }
2624 }
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637 func getm() uintptr {
2638 return uintptr(unsafe.Pointer(getg().m))
2639 }
2640
2641 var (
2642
2643
2644
2645
2646
2647
2648 extraM atomic.Uintptr
2649
2650 extraMLength atomic.Uint32
2651
2652 extraMWaiters atomic.Uint32
2653
2654
2655 extraMInUse atomic.Uint32
2656 )
2657
2658
2659
2660
2661
2662
2663
2664
2665 func lockextra(nilokay bool) *m {
2666 const locked = 1
2667
2668 incr := false
2669 for {
2670 old := extraM.Load()
2671 if old == locked {
2672 osyield_no_g()
2673 continue
2674 }
2675 if old == 0 && !nilokay {
2676 if !incr {
2677
2678
2679
2680 extraMWaiters.Add(1)
2681 incr = true
2682 }
2683 usleep_no_g(1)
2684 continue
2685 }
2686 if extraM.CompareAndSwap(old, locked) {
2687 return (*m)(unsafe.Pointer(old))
2688 }
2689 osyield_no_g()
2690 continue
2691 }
2692 }
2693
2694
2695 func unlockextra(mp *m, delta int32) {
2696 extraMLength.Add(delta)
2697 extraM.Store(uintptr(unsafe.Pointer(mp)))
2698 }
2699
2700
2701
2702
2703
2704
2705
2706
2707 func getExtraM() (mp *m, last bool) {
2708 mp = lockextra(false)
2709 extraMInUse.Add(1)
2710 unlockextra(mp.schedlink.ptr(), -1)
2711 return mp, mp.schedlink.ptr() == nil
2712 }
2713
2714
2715
2716
2717
2718 func putExtraM(mp *m) {
2719 extraMInUse.Add(-1)
2720 addExtraM(mp)
2721 }
2722
2723
2724
2725
2726 func addExtraM(mp *m) {
2727 mnext := lockextra(true)
2728 mp.schedlink.set(mnext)
2729 unlockextra(mp, 1)
2730 }
2731
2732 var (
2733
2734
2735
2736 allocmLock rwmutex
2737
2738
2739
2740
2741 execLock rwmutex
2742 )
2743
2744
2745
2746 const (
2747 failthreadcreate = "runtime: failed to create new OS thread\n"
2748 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2749 )
2750
2751
2752
2753
2754 var newmHandoff struct {
2755 lock mutex
2756
2757
2758
2759 newm muintptr
2760
2761
2762
2763 waiting bool
2764 wake note
2765
2766
2767
2768
2769 haveTemplateThread uint32
2770 }
2771
2772
2773
2774
2775
2776
2777
2778
2779 func newm(fn func(), pp *p, id int64) {
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790 acquirem()
2791
2792 mp := allocm(pp, fn, id)
2793 mp.nextp.set(pp)
2794 mp.sigmask = initSigmask
2795 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807 lock(&newmHandoff.lock)
2808 if newmHandoff.haveTemplateThread == 0 {
2809 throw("on a locked thread with no template thread")
2810 }
2811 mp.schedlink = newmHandoff.newm
2812 newmHandoff.newm.set(mp)
2813 if newmHandoff.waiting {
2814 newmHandoff.waiting = false
2815 notewakeup(&newmHandoff.wake)
2816 }
2817 unlock(&newmHandoff.lock)
2818
2819
2820
2821 releasem(getg().m)
2822 return
2823 }
2824 newm1(mp)
2825 releasem(getg().m)
2826 }
2827
2828 func newm1(mp *m) {
2829 if iscgo {
2830 var ts cgothreadstart
2831 if _cgo_thread_start == nil {
2832 throw("_cgo_thread_start missing")
2833 }
2834 ts.g.set(mp.g0)
2835 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2836 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2837 if msanenabled {
2838 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2839 }
2840 if asanenabled {
2841 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2842 }
2843 execLock.rlock()
2844 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2845 execLock.runlock()
2846 return
2847 }
2848 execLock.rlock()
2849 newosproc(mp)
2850 execLock.runlock()
2851 }
2852
2853
2854
2855
2856
2857 func startTemplateThread() {
2858 if GOARCH == "wasm" {
2859 return
2860 }
2861
2862
2863
2864 mp := acquirem()
2865 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2866 releasem(mp)
2867 return
2868 }
2869 newm(templateThread, nil, -1)
2870 releasem(mp)
2871 }
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885 func templateThread() {
2886 lock(&sched.lock)
2887 sched.nmsys++
2888 checkdead()
2889 unlock(&sched.lock)
2890
2891 for {
2892 lock(&newmHandoff.lock)
2893 for newmHandoff.newm != 0 {
2894 newm := newmHandoff.newm.ptr()
2895 newmHandoff.newm = 0
2896 unlock(&newmHandoff.lock)
2897 for newm != nil {
2898 next := newm.schedlink.ptr()
2899 newm.schedlink = 0
2900 newm1(newm)
2901 newm = next
2902 }
2903 lock(&newmHandoff.lock)
2904 }
2905 newmHandoff.waiting = true
2906 noteclear(&newmHandoff.wake)
2907 unlock(&newmHandoff.lock)
2908 notesleep(&newmHandoff.wake)
2909 }
2910 }
2911
2912
2913
2914 func stopm() {
2915 gp := getg()
2916
2917 if gp.m.locks != 0 {
2918 throw("stopm holding locks")
2919 }
2920 if gp.m.p != 0 {
2921 throw("stopm holding p")
2922 }
2923 if gp.m.spinning {
2924 throw("stopm spinning")
2925 }
2926
2927 lock(&sched.lock)
2928 mput(gp.m)
2929 unlock(&sched.lock)
2930 mPark()
2931 acquirep(gp.m.nextp.ptr())
2932 gp.m.nextp = 0
2933 }
2934
2935 func mspinning() {
2936
2937 getg().m.spinning = true
2938 }
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957 func startm(pp *p, spinning, lockheld bool) {
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974 mp := acquirem()
2975 if !lockheld {
2976 lock(&sched.lock)
2977 }
2978 if pp == nil {
2979 if spinning {
2980
2981
2982
2983 throw("startm: P required for spinning=true")
2984 }
2985 pp, _ = pidleget(0)
2986 if pp == nil {
2987 if !lockheld {
2988 unlock(&sched.lock)
2989 }
2990 releasem(mp)
2991 return
2992 }
2993 }
2994 nmp := mget()
2995 if nmp == nil {
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010 id := mReserveID()
3011 unlock(&sched.lock)
3012
3013 var fn func()
3014 if spinning {
3015
3016 fn = mspinning
3017 }
3018 newm(fn, pp, id)
3019
3020 if lockheld {
3021 lock(&sched.lock)
3022 }
3023
3024
3025 releasem(mp)
3026 return
3027 }
3028 if !lockheld {
3029 unlock(&sched.lock)
3030 }
3031 if nmp.spinning {
3032 throw("startm: m is spinning")
3033 }
3034 if nmp.nextp != 0 {
3035 throw("startm: m has p")
3036 }
3037 if spinning && !runqempty(pp) {
3038 throw("startm: p has runnable gs")
3039 }
3040
3041 nmp.spinning = spinning
3042 nmp.nextp.set(pp)
3043 notewakeup(&nmp.park)
3044
3045
3046 releasem(mp)
3047 }
3048
3049
3050
3051
3052
3053 func handoffp(pp *p) {
3054
3055
3056
3057
3058 if !runqempty(pp) || sched.runqsize != 0 {
3059 startm(pp, false, false)
3060 return
3061 }
3062
3063 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3064 startm(pp, false, false)
3065 return
3066 }
3067
3068 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3069 startm(pp, false, false)
3070 return
3071 }
3072
3073
3074 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3075 sched.needspinning.Store(0)
3076 startm(pp, true, false)
3077 return
3078 }
3079 lock(&sched.lock)
3080 if sched.gcwaiting.Load() {
3081 pp.status = _Pgcstop
3082 pp.gcStopTime = nanotime()
3083 sched.stopwait--
3084 if sched.stopwait == 0 {
3085 notewakeup(&sched.stopnote)
3086 }
3087 unlock(&sched.lock)
3088 return
3089 }
3090 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3091 sched.safePointFn(pp)
3092 sched.safePointWait--
3093 if sched.safePointWait == 0 {
3094 notewakeup(&sched.safePointNote)
3095 }
3096 }
3097 if sched.runqsize != 0 {
3098 unlock(&sched.lock)
3099 startm(pp, false, false)
3100 return
3101 }
3102
3103
3104 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3105 unlock(&sched.lock)
3106 startm(pp, false, false)
3107 return
3108 }
3109
3110
3111
3112 when := pp.timers.wakeTime()
3113 pidleput(pp, 0)
3114 unlock(&sched.lock)
3115
3116 if when != 0 {
3117 wakeNetPoller(when)
3118 }
3119 }
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134 func wakep() {
3135
3136
3137 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3138 return
3139 }
3140
3141
3142
3143
3144
3145
3146 mp := acquirem()
3147
3148 var pp *p
3149 lock(&sched.lock)
3150 pp, _ = pidlegetSpinning(0)
3151 if pp == nil {
3152 if sched.nmspinning.Add(-1) < 0 {
3153 throw("wakep: negative nmspinning")
3154 }
3155 unlock(&sched.lock)
3156 releasem(mp)
3157 return
3158 }
3159
3160
3161
3162
3163 unlock(&sched.lock)
3164
3165 startm(pp, true, false)
3166
3167 releasem(mp)
3168 }
3169
3170
3171
3172 func stoplockedm() {
3173 gp := getg()
3174
3175 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3176 throw("stoplockedm: inconsistent locking")
3177 }
3178 if gp.m.p != 0 {
3179
3180 pp := releasep()
3181 handoffp(pp)
3182 }
3183 incidlelocked(1)
3184
3185 mPark()
3186 status := readgstatus(gp.m.lockedg.ptr())
3187 if status&^_Gscan != _Grunnable {
3188 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3189 dumpgstatus(gp.m.lockedg.ptr())
3190 throw("stoplockedm: not runnable")
3191 }
3192 acquirep(gp.m.nextp.ptr())
3193 gp.m.nextp = 0
3194 }
3195
3196
3197
3198
3199
3200 func startlockedm(gp *g) {
3201 mp := gp.lockedm.ptr()
3202 if mp == getg().m {
3203 throw("startlockedm: locked to me")
3204 }
3205 if mp.nextp != 0 {
3206 throw("startlockedm: m has p")
3207 }
3208
3209 incidlelocked(-1)
3210 pp := releasep()
3211 mp.nextp.set(pp)
3212 notewakeup(&mp.park)
3213 stopm()
3214 }
3215
3216
3217
3218 func gcstopm() {
3219 gp := getg()
3220
3221 if !sched.gcwaiting.Load() {
3222 throw("gcstopm: not waiting for gc")
3223 }
3224 if gp.m.spinning {
3225 gp.m.spinning = false
3226
3227
3228 if sched.nmspinning.Add(-1) < 0 {
3229 throw("gcstopm: negative nmspinning")
3230 }
3231 }
3232 pp := releasep()
3233 lock(&sched.lock)
3234 pp.status = _Pgcstop
3235 pp.gcStopTime = nanotime()
3236 sched.stopwait--
3237 if sched.stopwait == 0 {
3238 notewakeup(&sched.stopnote)
3239 }
3240 unlock(&sched.lock)
3241 stopm()
3242 }
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253 func execute(gp *g, inheritTime bool) {
3254 mp := getg().m
3255
3256 if goroutineProfile.active {
3257
3258
3259
3260 tryRecordGoroutineProfile(gp, nil, osyield)
3261 }
3262
3263
3264
3265 mp.curg = gp
3266 gp.m = mp
3267 casgstatus(gp, _Grunnable, _Grunning)
3268 gp.waitsince = 0
3269 gp.preempt = false
3270 gp.stackguard0 = gp.stack.lo + stackGuard
3271 if !inheritTime {
3272 mp.p.ptr().schedtick++
3273 }
3274
3275
3276 hz := sched.profilehz
3277 if mp.profilehz != hz {
3278 setThreadCPUProfiler(hz)
3279 }
3280
3281 trace := traceAcquire()
3282 if trace.ok() {
3283 trace.GoStart()
3284 traceRelease(trace)
3285 }
3286
3287 gogo(&gp.sched)
3288 }
3289
3290
3291
3292
3293
3294 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3295 mp := getg().m
3296
3297
3298
3299
3300
3301 top:
3302 pp := mp.p.ptr()
3303 if sched.gcwaiting.Load() {
3304 gcstopm()
3305 goto top
3306 }
3307 if pp.runSafePointFn != 0 {
3308 runSafePointFn()
3309 }
3310
3311
3312
3313
3314
3315 now, pollUntil, _ := pp.timers.check(0)
3316
3317
3318 if traceEnabled() || traceShuttingDown() {
3319 gp := traceReader()
3320 if gp != nil {
3321 trace := traceAcquire()
3322 casgstatus(gp, _Gwaiting, _Grunnable)
3323 if trace.ok() {
3324 trace.GoUnpark(gp, 0)
3325 traceRelease(trace)
3326 }
3327 return gp, false, true
3328 }
3329 }
3330
3331
3332 if gcBlackenEnabled != 0 {
3333 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3334 if gp != nil {
3335 return gp, false, true
3336 }
3337 now = tnow
3338 }
3339
3340
3341
3342
3343 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3344 lock(&sched.lock)
3345 gp := globrunqget(pp, 1)
3346 unlock(&sched.lock)
3347 if gp != nil {
3348 return gp, false, false
3349 }
3350 }
3351
3352
3353 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3354 if gp := wakefing(); gp != nil {
3355 ready(gp, 0, true)
3356 }
3357 }
3358 if *cgo_yield != nil {
3359 asmcgocall(*cgo_yield, nil)
3360 }
3361
3362
3363 if gp, inheritTime := runqget(pp); gp != nil {
3364 return gp, inheritTime, false
3365 }
3366
3367
3368 if sched.runqsize != 0 {
3369 lock(&sched.lock)
3370 gp := globrunqget(pp, 0)
3371 unlock(&sched.lock)
3372 if gp != nil {
3373 return gp, false, false
3374 }
3375 }
3376
3377
3378
3379
3380
3381
3382
3383
3384 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3385 if list, delta := netpoll(0); !list.empty() {
3386 gp := list.pop()
3387 injectglist(&list)
3388 netpollAdjustWaiters(delta)
3389 trace := traceAcquire()
3390 casgstatus(gp, _Gwaiting, _Grunnable)
3391 if trace.ok() {
3392 trace.GoUnpark(gp, 0)
3393 traceRelease(trace)
3394 }
3395 return gp, false, false
3396 }
3397 }
3398
3399
3400
3401
3402
3403
3404 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3405 if !mp.spinning {
3406 mp.becomeSpinning()
3407 }
3408
3409 gp, inheritTime, tnow, w, newWork := stealWork(now)
3410 if gp != nil {
3411
3412 return gp, inheritTime, false
3413 }
3414 if newWork {
3415
3416
3417 goto top
3418 }
3419
3420 now = tnow
3421 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3422
3423 pollUntil = w
3424 }
3425 }
3426
3427
3428
3429
3430
3431 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3432 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3433 if node != nil {
3434 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3435 gp := node.gp.ptr()
3436
3437 trace := traceAcquire()
3438 casgstatus(gp, _Gwaiting, _Grunnable)
3439 if trace.ok() {
3440 trace.GoUnpark(gp, 0)
3441 traceRelease(trace)
3442 }
3443 return gp, false, false
3444 }
3445 gcController.removeIdleMarkWorker()
3446 }
3447
3448
3449
3450
3451
3452 gp, otherReady := beforeIdle(now, pollUntil)
3453 if gp != nil {
3454 trace := traceAcquire()
3455 casgstatus(gp, _Gwaiting, _Grunnable)
3456 if trace.ok() {
3457 trace.GoUnpark(gp, 0)
3458 traceRelease(trace)
3459 }
3460 return gp, false, false
3461 }
3462 if otherReady {
3463 goto top
3464 }
3465
3466
3467
3468
3469
3470 allpSnapshot := allp
3471
3472
3473 idlepMaskSnapshot := idlepMask
3474 timerpMaskSnapshot := timerpMask
3475
3476
3477 lock(&sched.lock)
3478 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3479 unlock(&sched.lock)
3480 goto top
3481 }
3482 if sched.runqsize != 0 {
3483 gp := globrunqget(pp, 0)
3484 unlock(&sched.lock)
3485 return gp, false, false
3486 }
3487 if !mp.spinning && sched.needspinning.Load() == 1 {
3488
3489 mp.becomeSpinning()
3490 unlock(&sched.lock)
3491 goto top
3492 }
3493 if releasep() != pp {
3494 throw("findrunnable: wrong p")
3495 }
3496 now = pidleput(pp, now)
3497 unlock(&sched.lock)
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535 wasSpinning := mp.spinning
3536 if mp.spinning {
3537 mp.spinning = false
3538 if sched.nmspinning.Add(-1) < 0 {
3539 throw("findrunnable: negative nmspinning")
3540 }
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553 lock(&sched.lock)
3554 if sched.runqsize != 0 {
3555 pp, _ := pidlegetSpinning(0)
3556 if pp != nil {
3557 gp := globrunqget(pp, 0)
3558 if gp == nil {
3559 throw("global runq empty with non-zero runqsize")
3560 }
3561 unlock(&sched.lock)
3562 acquirep(pp)
3563 mp.becomeSpinning()
3564 return gp, false, false
3565 }
3566 }
3567 unlock(&sched.lock)
3568
3569 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3570 if pp != nil {
3571 acquirep(pp)
3572 mp.becomeSpinning()
3573 goto top
3574 }
3575
3576
3577 pp, gp := checkIdleGCNoP()
3578 if pp != nil {
3579 acquirep(pp)
3580 mp.becomeSpinning()
3581
3582
3583 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3584 trace := traceAcquire()
3585 casgstatus(gp, _Gwaiting, _Grunnable)
3586 if trace.ok() {
3587 trace.GoUnpark(gp, 0)
3588 traceRelease(trace)
3589 }
3590 return gp, false, false
3591 }
3592
3593
3594
3595
3596
3597
3598
3599 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3600 }
3601
3602
3603 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3604 sched.pollUntil.Store(pollUntil)
3605 if mp.p != 0 {
3606 throw("findrunnable: netpoll with p")
3607 }
3608 if mp.spinning {
3609 throw("findrunnable: netpoll with spinning")
3610 }
3611 delay := int64(-1)
3612 if pollUntil != 0 {
3613 if now == 0 {
3614 now = nanotime()
3615 }
3616 delay = pollUntil - now
3617 if delay < 0 {
3618 delay = 0
3619 }
3620 }
3621 if faketime != 0 {
3622
3623 delay = 0
3624 }
3625 list, delta := netpoll(delay)
3626
3627 now = nanotime()
3628 sched.pollUntil.Store(0)
3629 sched.lastpoll.Store(now)
3630 if faketime != 0 && list.empty() {
3631
3632
3633 stopm()
3634 goto top
3635 }
3636 lock(&sched.lock)
3637 pp, _ := pidleget(now)
3638 unlock(&sched.lock)
3639 if pp == nil {
3640 injectglist(&list)
3641 netpollAdjustWaiters(delta)
3642 } else {
3643 acquirep(pp)
3644 if !list.empty() {
3645 gp := list.pop()
3646 injectglist(&list)
3647 netpollAdjustWaiters(delta)
3648 trace := traceAcquire()
3649 casgstatus(gp, _Gwaiting, _Grunnable)
3650 if trace.ok() {
3651 trace.GoUnpark(gp, 0)
3652 traceRelease(trace)
3653 }
3654 return gp, false, false
3655 }
3656 if wasSpinning {
3657 mp.becomeSpinning()
3658 }
3659 goto top
3660 }
3661 } else if pollUntil != 0 && netpollinited() {
3662 pollerPollUntil := sched.pollUntil.Load()
3663 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3664 netpollBreak()
3665 }
3666 }
3667 stopm()
3668 goto top
3669 }
3670
3671
3672
3673
3674
3675 func pollWork() bool {
3676 if sched.runqsize != 0 {
3677 return true
3678 }
3679 p := getg().m.p.ptr()
3680 if !runqempty(p) {
3681 return true
3682 }
3683 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3684 if list, delta := netpoll(0); !list.empty() {
3685 injectglist(&list)
3686 netpollAdjustWaiters(delta)
3687 return true
3688 }
3689 }
3690 return false
3691 }
3692
3693
3694
3695
3696
3697
3698
3699 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3700 pp := getg().m.p.ptr()
3701
3702 ranTimer := false
3703
3704 const stealTries = 4
3705 for i := 0; i < stealTries; i++ {
3706 stealTimersOrRunNextG := i == stealTries-1
3707
3708 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3709 if sched.gcwaiting.Load() {
3710
3711 return nil, false, now, pollUntil, true
3712 }
3713 p2 := allp[enum.position()]
3714 if pp == p2 {
3715 continue
3716 }
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3732 tnow, w, ran := p2.timers.check(now)
3733 now = tnow
3734 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3735 pollUntil = w
3736 }
3737 if ran {
3738
3739
3740
3741
3742
3743
3744
3745
3746 if gp, inheritTime := runqget(pp); gp != nil {
3747 return gp, inheritTime, now, pollUntil, ranTimer
3748 }
3749 ranTimer = true
3750 }
3751 }
3752
3753
3754 if !idlepMask.read(enum.position()) {
3755 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3756 return gp, false, now, pollUntil, ranTimer
3757 }
3758 }
3759 }
3760 }
3761
3762
3763
3764
3765 return nil, false, now, pollUntil, ranTimer
3766 }
3767
3768
3769
3770
3771
3772
3773 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3774 for id, p2 := range allpSnapshot {
3775 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3776 lock(&sched.lock)
3777 pp, _ := pidlegetSpinning(0)
3778 if pp == nil {
3779
3780 unlock(&sched.lock)
3781 return nil
3782 }
3783 unlock(&sched.lock)
3784 return pp
3785 }
3786 }
3787
3788
3789 return nil
3790 }
3791
3792
3793
3794
3795 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3796 for id, p2 := range allpSnapshot {
3797 if timerpMaskSnapshot.read(uint32(id)) {
3798 w := p2.timers.wakeTime()
3799 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3800 pollUntil = w
3801 }
3802 }
3803 }
3804
3805 return pollUntil
3806 }
3807
3808
3809
3810
3811
3812 func checkIdleGCNoP() (*p, *g) {
3813
3814
3815
3816
3817
3818
3819 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3820 return nil, nil
3821 }
3822 if !gcMarkWorkAvailable(nil) {
3823 return nil, nil
3824 }
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843 lock(&sched.lock)
3844 pp, now := pidlegetSpinning(0)
3845 if pp == nil {
3846 unlock(&sched.lock)
3847 return nil, nil
3848 }
3849
3850
3851 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3852 pidleput(pp, now)
3853 unlock(&sched.lock)
3854 return nil, nil
3855 }
3856
3857 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3858 if node == nil {
3859 pidleput(pp, now)
3860 unlock(&sched.lock)
3861 gcController.removeIdleMarkWorker()
3862 return nil, nil
3863 }
3864
3865 unlock(&sched.lock)
3866
3867 return pp, node.gp.ptr()
3868 }
3869
3870
3871
3872
3873 func wakeNetPoller(when int64) {
3874 if sched.lastpoll.Load() == 0 {
3875
3876
3877
3878
3879 pollerPollUntil := sched.pollUntil.Load()
3880 if pollerPollUntil == 0 || pollerPollUntil > when {
3881 netpollBreak()
3882 }
3883 } else {
3884
3885
3886 if GOOS != "plan9" {
3887 wakep()
3888 }
3889 }
3890 }
3891
3892 func resetspinning() {
3893 gp := getg()
3894 if !gp.m.spinning {
3895 throw("resetspinning: not a spinning m")
3896 }
3897 gp.m.spinning = false
3898 nmspinning := sched.nmspinning.Add(-1)
3899 if nmspinning < 0 {
3900 throw("findrunnable: negative nmspinning")
3901 }
3902
3903
3904
3905 wakep()
3906 }
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916 func injectglist(glist *gList) {
3917 if glist.empty() {
3918 return
3919 }
3920
3921
3922
3923 head := glist.head.ptr()
3924 var tail *g
3925 qsize := 0
3926 trace := traceAcquire()
3927 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3928 tail = gp
3929 qsize++
3930 casgstatus(gp, _Gwaiting, _Grunnable)
3931 if trace.ok() {
3932 trace.GoUnpark(gp, 0)
3933 }
3934 }
3935 if trace.ok() {
3936 traceRelease(trace)
3937 }
3938
3939
3940 var q gQueue
3941 q.head.set(head)
3942 q.tail.set(tail)
3943 *glist = gList{}
3944
3945 startIdle := func(n int) {
3946 for i := 0; i < n; i++ {
3947 mp := acquirem()
3948 lock(&sched.lock)
3949
3950 pp, _ := pidlegetSpinning(0)
3951 if pp == nil {
3952 unlock(&sched.lock)
3953 releasem(mp)
3954 break
3955 }
3956
3957 startm(pp, false, true)
3958 unlock(&sched.lock)
3959 releasem(mp)
3960 }
3961 }
3962
3963 pp := getg().m.p.ptr()
3964 if pp == nil {
3965 lock(&sched.lock)
3966 globrunqputbatch(&q, int32(qsize))
3967 unlock(&sched.lock)
3968 startIdle(qsize)
3969 return
3970 }
3971
3972 npidle := int(sched.npidle.Load())
3973 var (
3974 globq gQueue
3975 n int
3976 )
3977 for n = 0; n < npidle && !q.empty(); n++ {
3978 g := q.pop()
3979 globq.pushBack(g)
3980 }
3981 if n > 0 {
3982 lock(&sched.lock)
3983 globrunqputbatch(&globq, int32(n))
3984 unlock(&sched.lock)
3985 startIdle(n)
3986 qsize -= n
3987 }
3988
3989 if !q.empty() {
3990 runqputbatch(pp, &q, qsize)
3991 }
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006 wakep()
4007 }
4008
4009
4010
4011 func schedule() {
4012 mp := getg().m
4013
4014 if mp.locks != 0 {
4015 throw("schedule: holding locks")
4016 }
4017
4018 if mp.lockedg != 0 {
4019 stoplockedm()
4020 execute(mp.lockedg.ptr(), false)
4021 }
4022
4023
4024
4025 if mp.incgo {
4026 throw("schedule: in cgo")
4027 }
4028
4029 top:
4030 pp := mp.p.ptr()
4031 pp.preempt = false
4032
4033
4034
4035
4036 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4037 throw("schedule: spinning with local work")
4038 }
4039
4040 gp, inheritTime, tryWakeP := findRunnable()
4041
4042 if debug.dontfreezetheworld > 0 && freezing.Load() {
4043
4044
4045
4046
4047
4048
4049
4050 lock(&deadlock)
4051 lock(&deadlock)
4052 }
4053
4054
4055
4056
4057 if mp.spinning {
4058 resetspinning()
4059 }
4060
4061 if sched.disable.user && !schedEnabled(gp) {
4062
4063
4064
4065 lock(&sched.lock)
4066 if schedEnabled(gp) {
4067
4068
4069 unlock(&sched.lock)
4070 } else {
4071 sched.disable.runnable.pushBack(gp)
4072 sched.disable.n++
4073 unlock(&sched.lock)
4074 goto top
4075 }
4076 }
4077
4078
4079
4080 if tryWakeP {
4081 wakep()
4082 }
4083 if gp.lockedm != 0 {
4084
4085
4086 startlockedm(gp)
4087 goto top
4088 }
4089
4090 execute(gp, inheritTime)
4091 }
4092
4093
4094
4095
4096
4097
4098
4099
4100 func dropg() {
4101 gp := getg()
4102
4103 setMNoWB(&gp.m.curg.m, nil)
4104 setGNoWB(&gp.m.curg, nil)
4105 }
4106
4107 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4108 unlock((*mutex)(lock))
4109 return true
4110 }
4111
4112
4113 func park_m(gp *g) {
4114 mp := getg().m
4115
4116 trace := traceAcquire()
4117
4118
4119
4120
4121
4122 sg := gp.syncGroup
4123 if sg != nil {
4124 sg.incActive()
4125 }
4126
4127 if trace.ok() {
4128
4129
4130
4131 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4132 }
4133
4134
4135 casgstatus(gp, _Grunning, _Gwaiting)
4136 if trace.ok() {
4137 traceRelease(trace)
4138 }
4139
4140 dropg()
4141
4142 if fn := mp.waitunlockf; fn != nil {
4143 ok := fn(gp, mp.waitlock)
4144 mp.waitunlockf = nil
4145 mp.waitlock = nil
4146 if !ok {
4147 trace := traceAcquire()
4148 casgstatus(gp, _Gwaiting, _Grunnable)
4149 if sg != nil {
4150 sg.decActive()
4151 }
4152 if trace.ok() {
4153 trace.GoUnpark(gp, 2)
4154 traceRelease(trace)
4155 }
4156 execute(gp, true)
4157 }
4158 }
4159
4160 if sg != nil {
4161 sg.decActive()
4162 }
4163
4164 schedule()
4165 }
4166
4167 func goschedImpl(gp *g, preempted bool) {
4168 trace := traceAcquire()
4169 status := readgstatus(gp)
4170 if status&^_Gscan != _Grunning {
4171 dumpgstatus(gp)
4172 throw("bad g status")
4173 }
4174 if trace.ok() {
4175
4176
4177
4178 if preempted {
4179 trace.GoPreempt()
4180 } else {
4181 trace.GoSched()
4182 }
4183 }
4184 casgstatus(gp, _Grunning, _Grunnable)
4185 if trace.ok() {
4186 traceRelease(trace)
4187 }
4188
4189 dropg()
4190 lock(&sched.lock)
4191 globrunqput(gp)
4192 unlock(&sched.lock)
4193
4194 if mainStarted {
4195 wakep()
4196 }
4197
4198 schedule()
4199 }
4200
4201
4202 func gosched_m(gp *g) {
4203 goschedImpl(gp, false)
4204 }
4205
4206
4207 func goschedguarded_m(gp *g) {
4208 if !canPreemptM(gp.m) {
4209 gogo(&gp.sched)
4210 }
4211 goschedImpl(gp, false)
4212 }
4213
4214 func gopreempt_m(gp *g) {
4215 goschedImpl(gp, true)
4216 }
4217
4218
4219
4220
4221 func preemptPark(gp *g) {
4222 status := readgstatus(gp)
4223 if status&^_Gscan != _Grunning {
4224 dumpgstatus(gp)
4225 throw("bad g status")
4226 }
4227
4228 if gp.asyncSafePoint {
4229
4230
4231
4232 f := findfunc(gp.sched.pc)
4233 if !f.valid() {
4234 throw("preempt at unknown pc")
4235 }
4236 if f.flag&abi.FuncFlagSPWrite != 0 {
4237 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4238 throw("preempt SPWRITE")
4239 }
4240 }
4241
4242
4243
4244
4245
4246
4247
4248 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4249 dropg()
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266 trace := traceAcquire()
4267 if trace.ok() {
4268 trace.GoPark(traceBlockPreempted, 0)
4269 }
4270 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4271 if trace.ok() {
4272 traceRelease(trace)
4273 }
4274 schedule()
4275 }
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291 func goyield() {
4292 checkTimeouts()
4293 mcall(goyield_m)
4294 }
4295
4296 func goyield_m(gp *g) {
4297 trace := traceAcquire()
4298 pp := gp.m.p.ptr()
4299 if trace.ok() {
4300
4301
4302
4303 trace.GoPreempt()
4304 }
4305 casgstatus(gp, _Grunning, _Grunnable)
4306 if trace.ok() {
4307 traceRelease(trace)
4308 }
4309 dropg()
4310 runqput(pp, gp, false)
4311 schedule()
4312 }
4313
4314
4315 func goexit1() {
4316 if raceenabled {
4317 if gp := getg(); gp.syncGroup != nil {
4318 racereleasemergeg(gp, gp.syncGroup.raceaddr())
4319 }
4320 racegoend()
4321 }
4322 trace := traceAcquire()
4323 if trace.ok() {
4324 trace.GoEnd()
4325 traceRelease(trace)
4326 }
4327 mcall(goexit0)
4328 }
4329
4330
4331 func goexit0(gp *g) {
4332 gdestroy(gp)
4333 schedule()
4334 }
4335
4336 func gdestroy(gp *g) {
4337 mp := getg().m
4338 pp := mp.p.ptr()
4339
4340 casgstatus(gp, _Grunning, _Gdead)
4341 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4342 if isSystemGoroutine(gp, false) {
4343 sched.ngsys.Add(-1)
4344 }
4345 gp.m = nil
4346 locked := gp.lockedm != 0
4347 gp.lockedm = 0
4348 mp.lockedg = 0
4349 gp.preemptStop = false
4350 gp.paniconfault = false
4351 gp._defer = nil
4352 gp._panic = nil
4353 gp.writebuf = nil
4354 gp.waitreason = waitReasonZero
4355 gp.param = nil
4356 gp.labels = nil
4357 gp.timer = nil
4358 gp.syncGroup = nil
4359
4360 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4361
4362
4363
4364 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4365 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4366 gcController.bgScanCredit.Add(scanCredit)
4367 gp.gcAssistBytes = 0
4368 }
4369
4370 dropg()
4371
4372 if GOARCH == "wasm" {
4373 gfput(pp, gp)
4374 return
4375 }
4376
4377 if locked && mp.lockedInt != 0 {
4378 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4379 if mp.isextra {
4380 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4381 }
4382 throw("exited a goroutine internally locked to the OS thread")
4383 }
4384 gfput(pp, gp)
4385 if locked {
4386
4387
4388
4389
4390
4391
4392 if GOOS != "plan9" {
4393 gogo(&mp.g0.sched)
4394 } else {
4395
4396
4397 mp.lockedExt = 0
4398 }
4399 }
4400 }
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410 func save(pc, sp, bp uintptr) {
4411 gp := getg()
4412
4413 if gp == gp.m.g0 || gp == gp.m.gsignal {
4414
4415
4416
4417
4418
4419 throw("save on system g not allowed")
4420 }
4421
4422 gp.sched.pc = pc
4423 gp.sched.sp = sp
4424 gp.sched.lr = 0
4425 gp.sched.bp = bp
4426
4427
4428
4429 if gp.sched.ctxt != nil {
4430 badctxt()
4431 }
4432 }
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458 func reentersyscall(pc, sp, bp uintptr) {
4459 trace := traceAcquire()
4460 gp := getg()
4461
4462
4463
4464 gp.m.locks++
4465
4466
4467
4468
4469
4470 gp.stackguard0 = stackPreempt
4471 gp.throwsplit = true
4472
4473
4474 save(pc, sp, bp)
4475 gp.syscallsp = sp
4476 gp.syscallpc = pc
4477 gp.syscallbp = bp
4478 casgstatus(gp, _Grunning, _Gsyscall)
4479 if staticLockRanking {
4480
4481
4482 save(pc, sp, bp)
4483 }
4484 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4485 systemstack(func() {
4486 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4487 throw("entersyscall")
4488 })
4489 }
4490 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4491 systemstack(func() {
4492 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4493 throw("entersyscall")
4494 })
4495 }
4496
4497 if trace.ok() {
4498 systemstack(func() {
4499 trace.GoSysCall()
4500 traceRelease(trace)
4501 })
4502
4503
4504
4505 save(pc, sp, bp)
4506 }
4507
4508 if sched.sysmonwait.Load() {
4509 systemstack(entersyscall_sysmon)
4510 save(pc, sp, bp)
4511 }
4512
4513 if gp.m.p.ptr().runSafePointFn != 0 {
4514
4515 systemstack(runSafePointFn)
4516 save(pc, sp, bp)
4517 }
4518
4519 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4520 pp := gp.m.p.ptr()
4521 pp.m = 0
4522 gp.m.oldp.set(pp)
4523 gp.m.p = 0
4524 atomic.Store(&pp.status, _Psyscall)
4525 if sched.gcwaiting.Load() {
4526 systemstack(entersyscall_gcwait)
4527 save(pc, sp, bp)
4528 }
4529
4530 gp.m.locks--
4531 }
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547 func entersyscall() {
4548
4549
4550
4551
4552 fp := getcallerfp()
4553 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4554 }
4555
4556 func entersyscall_sysmon() {
4557 lock(&sched.lock)
4558 if sched.sysmonwait.Load() {
4559 sched.sysmonwait.Store(false)
4560 notewakeup(&sched.sysmonnote)
4561 }
4562 unlock(&sched.lock)
4563 }
4564
4565 func entersyscall_gcwait() {
4566 gp := getg()
4567 pp := gp.m.oldp.ptr()
4568
4569 lock(&sched.lock)
4570 trace := traceAcquire()
4571 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4572 if trace.ok() {
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582 trace.ProcSteal(pp, true)
4583 traceRelease(trace)
4584 }
4585 pp.gcStopTime = nanotime()
4586 pp.syscalltick++
4587 if sched.stopwait--; sched.stopwait == 0 {
4588 notewakeup(&sched.stopnote)
4589 }
4590 } else if trace.ok() {
4591 traceRelease(trace)
4592 }
4593 unlock(&sched.lock)
4594 }
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608 func entersyscallblock() {
4609 gp := getg()
4610
4611 gp.m.locks++
4612 gp.throwsplit = true
4613 gp.stackguard0 = stackPreempt
4614 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4615 gp.m.p.ptr().syscalltick++
4616
4617
4618 pc := sys.GetCallerPC()
4619 sp := sys.GetCallerSP()
4620 bp := getcallerfp()
4621 save(pc, sp, bp)
4622 gp.syscallsp = gp.sched.sp
4623 gp.syscallpc = gp.sched.pc
4624 gp.syscallbp = gp.sched.bp
4625 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4626 sp1 := sp
4627 sp2 := gp.sched.sp
4628 sp3 := gp.syscallsp
4629 systemstack(func() {
4630 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4631 throw("entersyscallblock")
4632 })
4633 }
4634 casgstatus(gp, _Grunning, _Gsyscall)
4635 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4636 systemstack(func() {
4637 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4638 throw("entersyscallblock")
4639 })
4640 }
4641 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4642 systemstack(func() {
4643 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4644 throw("entersyscallblock")
4645 })
4646 }
4647
4648 systemstack(entersyscallblock_handoff)
4649
4650
4651 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4652
4653 gp.m.locks--
4654 }
4655
4656 func entersyscallblock_handoff() {
4657 trace := traceAcquire()
4658 if trace.ok() {
4659 trace.GoSysCall()
4660 traceRelease(trace)
4661 }
4662 handoffp(releasep())
4663 }
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685 func exitsyscall() {
4686 gp := getg()
4687
4688 gp.m.locks++
4689 if sys.GetCallerSP() > gp.syscallsp {
4690 throw("exitsyscall: syscall frame is no longer valid")
4691 }
4692
4693 gp.waitsince = 0
4694 oldp := gp.m.oldp.ptr()
4695 gp.m.oldp = 0
4696 if exitsyscallfast(oldp) {
4697
4698
4699 if goroutineProfile.active {
4700
4701
4702
4703 systemstack(func() {
4704 tryRecordGoroutineProfileWB(gp)
4705 })
4706 }
4707 trace := traceAcquire()
4708 if trace.ok() {
4709 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4710 systemstack(func() {
4711
4712
4713
4714
4715 trace.GoSysExit(lostP)
4716 if lostP {
4717
4718
4719
4720
4721 trace.GoStart()
4722 }
4723 })
4724 }
4725
4726 gp.m.p.ptr().syscalltick++
4727
4728 casgstatus(gp, _Gsyscall, _Grunning)
4729 if trace.ok() {
4730 traceRelease(trace)
4731 }
4732
4733
4734
4735 gp.syscallsp = 0
4736 gp.m.locks--
4737 if gp.preempt {
4738
4739 gp.stackguard0 = stackPreempt
4740 } else {
4741
4742 gp.stackguard0 = gp.stack.lo + stackGuard
4743 }
4744 gp.throwsplit = false
4745
4746 if sched.disable.user && !schedEnabled(gp) {
4747
4748 Gosched()
4749 }
4750
4751 return
4752 }
4753
4754 gp.m.locks--
4755
4756
4757 mcall(exitsyscall0)
4758
4759
4760
4761
4762
4763
4764
4765 gp.syscallsp = 0
4766 gp.m.p.ptr().syscalltick++
4767 gp.throwsplit = false
4768 }
4769
4770
4771 func exitsyscallfast(oldp *p) bool {
4772
4773 if sched.stopwait == freezeStopWait {
4774 return false
4775 }
4776
4777
4778 trace := traceAcquire()
4779 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4780
4781 wirep(oldp)
4782 exitsyscallfast_reacquired(trace)
4783 if trace.ok() {
4784 traceRelease(trace)
4785 }
4786 return true
4787 }
4788 if trace.ok() {
4789 traceRelease(trace)
4790 }
4791
4792
4793 if sched.pidle != 0 {
4794 var ok bool
4795 systemstack(func() {
4796 ok = exitsyscallfast_pidle()
4797 })
4798 if ok {
4799 return true
4800 }
4801 }
4802 return false
4803 }
4804
4805
4806
4807
4808
4809
4810 func exitsyscallfast_reacquired(trace traceLocker) {
4811 gp := getg()
4812 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4813 if trace.ok() {
4814
4815
4816
4817 systemstack(func() {
4818
4819
4820 trace.ProcSteal(gp.m.p.ptr(), true)
4821 trace.ProcStart()
4822 })
4823 }
4824 gp.m.p.ptr().syscalltick++
4825 }
4826 }
4827
4828 func exitsyscallfast_pidle() bool {
4829 lock(&sched.lock)
4830 pp, _ := pidleget(0)
4831 if pp != nil && sched.sysmonwait.Load() {
4832 sched.sysmonwait.Store(false)
4833 notewakeup(&sched.sysmonnote)
4834 }
4835 unlock(&sched.lock)
4836 if pp != nil {
4837 acquirep(pp)
4838 return true
4839 }
4840 return false
4841 }
4842
4843
4844
4845
4846
4847
4848
4849 func exitsyscall0(gp *g) {
4850 var trace traceLocker
4851 traceExitingSyscall()
4852 trace = traceAcquire()
4853 casgstatus(gp, _Gsyscall, _Grunnable)
4854 traceExitedSyscall()
4855 if trace.ok() {
4856
4857
4858
4859
4860 trace.GoSysExit(true)
4861 traceRelease(trace)
4862 }
4863 dropg()
4864 lock(&sched.lock)
4865 var pp *p
4866 if schedEnabled(gp) {
4867 pp, _ = pidleget(0)
4868 }
4869 var locked bool
4870 if pp == nil {
4871 globrunqput(gp)
4872
4873
4874
4875
4876
4877
4878 locked = gp.lockedm != 0
4879 } else if sched.sysmonwait.Load() {
4880 sched.sysmonwait.Store(false)
4881 notewakeup(&sched.sysmonnote)
4882 }
4883 unlock(&sched.lock)
4884 if pp != nil {
4885 acquirep(pp)
4886 execute(gp, false)
4887 }
4888 if locked {
4889
4890
4891
4892
4893 stoplockedm()
4894 execute(gp, false)
4895 }
4896 stopm()
4897 schedule()
4898 }
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912 func syscall_runtime_BeforeFork() {
4913 gp := getg().m.curg
4914
4915
4916
4917
4918 gp.m.locks++
4919 sigsave(&gp.m.sigmask)
4920 sigblock(false)
4921
4922
4923
4924
4925
4926 gp.stackguard0 = stackFork
4927 }
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941 func syscall_runtime_AfterFork() {
4942 gp := getg().m.curg
4943
4944
4945 gp.stackguard0 = gp.stack.lo + stackGuard
4946
4947 msigrestore(gp.m.sigmask)
4948
4949 gp.m.locks--
4950 }
4951
4952
4953
4954 var inForkedChild bool
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975 func syscall_runtime_AfterForkInChild() {
4976
4977
4978
4979
4980 inForkedChild = true
4981
4982 clearSignalHandlers()
4983
4984
4985
4986 msigrestore(getg().m.sigmask)
4987
4988 inForkedChild = false
4989 }
4990
4991
4992
4993
4994 var pendingPreemptSignals atomic.Int32
4995
4996
4997
4998
4999 func syscall_runtime_BeforeExec() {
5000
5001 execLock.lock()
5002
5003
5004
5005 if GOOS == "darwin" || GOOS == "ios" {
5006 for pendingPreemptSignals.Load() > 0 {
5007 osyield()
5008 }
5009 }
5010 }
5011
5012
5013
5014
5015 func syscall_runtime_AfterExec() {
5016 execLock.unlock()
5017 }
5018
5019
5020 func malg(stacksize int32) *g {
5021 newg := new(g)
5022 if stacksize >= 0 {
5023 stacksize = round2(stackSystem + stacksize)
5024 systemstack(func() {
5025 newg.stack = stackalloc(uint32(stacksize))
5026 })
5027 newg.stackguard0 = newg.stack.lo + stackGuard
5028 newg.stackguard1 = ^uintptr(0)
5029
5030
5031 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5032 }
5033 return newg
5034 }
5035
5036
5037
5038
5039 func newproc(fn *funcval) {
5040 gp := getg()
5041 pc := sys.GetCallerPC()
5042 systemstack(func() {
5043 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5044
5045 pp := getg().m.p.ptr()
5046 runqput(pp, newg, true)
5047
5048 if mainStarted {
5049 wakep()
5050 }
5051 })
5052 }
5053
5054
5055
5056
5057 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5058 if fn == nil {
5059 fatal("go of nil func value")
5060 }
5061
5062 mp := acquirem()
5063 pp := mp.p.ptr()
5064 newg := gfget(pp)
5065 if newg == nil {
5066 newg = malg(stackMin)
5067 casgstatus(newg, _Gidle, _Gdead)
5068 allgadd(newg)
5069 }
5070 if newg.stack.hi == 0 {
5071 throw("newproc1: newg missing stack")
5072 }
5073
5074 if readgstatus(newg) != _Gdead {
5075 throw("newproc1: new g is not Gdead")
5076 }
5077
5078 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5079 totalSize = alignUp(totalSize, sys.StackAlign)
5080 sp := newg.stack.hi - totalSize
5081 if usesLR {
5082
5083 *(*uintptr)(unsafe.Pointer(sp)) = 0
5084 prepGoExitFrame(sp)
5085 }
5086 if GOARCH == "arm64" {
5087
5088 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5089 }
5090
5091 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5092 newg.sched.sp = sp
5093 newg.stktopsp = sp
5094 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5095 newg.sched.g = guintptr(unsafe.Pointer(newg))
5096 gostartcallfn(&newg.sched, fn)
5097 newg.parentGoid = callergp.goid
5098 newg.gopc = callerpc
5099 newg.ancestors = saveAncestors(callergp)
5100 newg.startpc = fn.fn
5101 if isSystemGoroutine(newg, false) {
5102 sched.ngsys.Add(1)
5103 } else {
5104
5105 newg.syncGroup = callergp.syncGroup
5106 if mp.curg != nil {
5107 newg.labels = mp.curg.labels
5108 }
5109 if goroutineProfile.active {
5110
5111
5112
5113
5114
5115 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5116 }
5117 }
5118
5119 newg.trackingSeq = uint8(cheaprand())
5120 if newg.trackingSeq%gTrackingPeriod == 0 {
5121 newg.tracking = true
5122 }
5123 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5124
5125
5126 trace := traceAcquire()
5127 var status uint32 = _Grunnable
5128 if parked {
5129 status = _Gwaiting
5130 newg.waitreason = waitreason
5131 }
5132 if pp.goidcache == pp.goidcacheend {
5133
5134
5135
5136 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5137 pp.goidcache -= _GoidCacheBatch - 1
5138 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5139 }
5140 newg.goid = pp.goidcache
5141 casgstatus(newg, _Gdead, status)
5142 pp.goidcache++
5143 newg.trace.reset()
5144 if trace.ok() {
5145 trace.GoCreate(newg, newg.startpc, parked)
5146 traceRelease(trace)
5147 }
5148
5149
5150 if raceenabled {
5151 newg.racectx = racegostart(callerpc)
5152 newg.raceignore = 0
5153 if newg.labels != nil {
5154
5155
5156 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5157 }
5158 }
5159 releasem(mp)
5160
5161 return newg
5162 }
5163
5164
5165
5166
5167 func saveAncestors(callergp *g) *[]ancestorInfo {
5168
5169 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5170 return nil
5171 }
5172 var callerAncestors []ancestorInfo
5173 if callergp.ancestors != nil {
5174 callerAncestors = *callergp.ancestors
5175 }
5176 n := int32(len(callerAncestors)) + 1
5177 if n > debug.tracebackancestors {
5178 n = debug.tracebackancestors
5179 }
5180 ancestors := make([]ancestorInfo, n)
5181 copy(ancestors[1:], callerAncestors)
5182
5183 var pcs [tracebackInnerFrames]uintptr
5184 npcs := gcallers(callergp, 0, pcs[:])
5185 ipcs := make([]uintptr, npcs)
5186 copy(ipcs, pcs[:])
5187 ancestors[0] = ancestorInfo{
5188 pcs: ipcs,
5189 goid: callergp.goid,
5190 gopc: callergp.gopc,
5191 }
5192
5193 ancestorsp := new([]ancestorInfo)
5194 *ancestorsp = ancestors
5195 return ancestorsp
5196 }
5197
5198
5199
5200 func gfput(pp *p, gp *g) {
5201 if readgstatus(gp) != _Gdead {
5202 throw("gfput: bad status (not Gdead)")
5203 }
5204
5205 stksize := gp.stack.hi - gp.stack.lo
5206
5207 if stksize != uintptr(startingStackSize) {
5208
5209 stackfree(gp.stack)
5210 gp.stack.lo = 0
5211 gp.stack.hi = 0
5212 gp.stackguard0 = 0
5213 }
5214
5215 pp.gFree.push(gp)
5216 pp.gFree.n++
5217 if pp.gFree.n >= 64 {
5218 var (
5219 inc int32
5220 stackQ gQueue
5221 noStackQ gQueue
5222 )
5223 for pp.gFree.n >= 32 {
5224 gp := pp.gFree.pop()
5225 pp.gFree.n--
5226 if gp.stack.lo == 0 {
5227 noStackQ.push(gp)
5228 } else {
5229 stackQ.push(gp)
5230 }
5231 inc++
5232 }
5233 lock(&sched.gFree.lock)
5234 sched.gFree.noStack.pushAll(noStackQ)
5235 sched.gFree.stack.pushAll(stackQ)
5236 sched.gFree.n += inc
5237 unlock(&sched.gFree.lock)
5238 }
5239 }
5240
5241
5242
5243 func gfget(pp *p) *g {
5244 retry:
5245 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5246 lock(&sched.gFree.lock)
5247
5248 for pp.gFree.n < 32 {
5249
5250 gp := sched.gFree.stack.pop()
5251 if gp == nil {
5252 gp = sched.gFree.noStack.pop()
5253 if gp == nil {
5254 break
5255 }
5256 }
5257 sched.gFree.n--
5258 pp.gFree.push(gp)
5259 pp.gFree.n++
5260 }
5261 unlock(&sched.gFree.lock)
5262 goto retry
5263 }
5264 gp := pp.gFree.pop()
5265 if gp == nil {
5266 return nil
5267 }
5268 pp.gFree.n--
5269 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5270
5271
5272
5273 systemstack(func() {
5274 stackfree(gp.stack)
5275 gp.stack.lo = 0
5276 gp.stack.hi = 0
5277 gp.stackguard0 = 0
5278 })
5279 }
5280 if gp.stack.lo == 0 {
5281
5282 systemstack(func() {
5283 gp.stack = stackalloc(startingStackSize)
5284 })
5285 gp.stackguard0 = gp.stack.lo + stackGuard
5286 } else {
5287 if raceenabled {
5288 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5289 }
5290 if msanenabled {
5291 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5292 }
5293 if asanenabled {
5294 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5295 }
5296 }
5297 return gp
5298 }
5299
5300
5301 func gfpurge(pp *p) {
5302 var (
5303 inc int32
5304 stackQ gQueue
5305 noStackQ gQueue
5306 )
5307 for !pp.gFree.empty() {
5308 gp := pp.gFree.pop()
5309 pp.gFree.n--
5310 if gp.stack.lo == 0 {
5311 noStackQ.push(gp)
5312 } else {
5313 stackQ.push(gp)
5314 }
5315 inc++
5316 }
5317 lock(&sched.gFree.lock)
5318 sched.gFree.noStack.pushAll(noStackQ)
5319 sched.gFree.stack.pushAll(stackQ)
5320 sched.gFree.n += inc
5321 unlock(&sched.gFree.lock)
5322 }
5323
5324
5325 func Breakpoint() {
5326 breakpoint()
5327 }
5328
5329
5330
5331
5332
5333
5334 func dolockOSThread() {
5335 if GOARCH == "wasm" {
5336 return
5337 }
5338 gp := getg()
5339 gp.m.lockedg.set(gp)
5340 gp.lockedm.set(gp.m)
5341 }
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359 func LockOSThread() {
5360 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5361
5362
5363
5364 startTemplateThread()
5365 }
5366 gp := getg()
5367 gp.m.lockedExt++
5368 if gp.m.lockedExt == 0 {
5369 gp.m.lockedExt--
5370 panic("LockOSThread nesting overflow")
5371 }
5372 dolockOSThread()
5373 }
5374
5375
5376 func lockOSThread() {
5377 getg().m.lockedInt++
5378 dolockOSThread()
5379 }
5380
5381
5382
5383
5384
5385
5386 func dounlockOSThread() {
5387 if GOARCH == "wasm" {
5388 return
5389 }
5390 gp := getg()
5391 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5392 return
5393 }
5394 gp.m.lockedg = 0
5395 gp.lockedm = 0
5396 }
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412 func UnlockOSThread() {
5413 gp := getg()
5414 if gp.m.lockedExt == 0 {
5415 return
5416 }
5417 gp.m.lockedExt--
5418 dounlockOSThread()
5419 }
5420
5421
5422 func unlockOSThread() {
5423 gp := getg()
5424 if gp.m.lockedInt == 0 {
5425 systemstack(badunlockosthread)
5426 }
5427 gp.m.lockedInt--
5428 dounlockOSThread()
5429 }
5430
5431 func badunlockosthread() {
5432 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5433 }
5434
5435 func gcount() int32 {
5436 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5437 for _, pp := range allp {
5438 n -= pp.gFree.n
5439 }
5440
5441
5442
5443 if n < 1 {
5444 n = 1
5445 }
5446 return n
5447 }
5448
5449 func mcount() int32 {
5450 return int32(sched.mnext - sched.nmfreed)
5451 }
5452
5453 var prof struct {
5454 signalLock atomic.Uint32
5455
5456
5457
5458 hz atomic.Int32
5459 }
5460
5461 func _System() { _System() }
5462 func _ExternalCode() { _ExternalCode() }
5463 func _LostExternalCode() { _LostExternalCode() }
5464 func _GC() { _GC() }
5465 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5466 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5467 func _VDSO() { _VDSO() }
5468
5469
5470
5471
5472
5473 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5474 if prof.hz.Load() == 0 {
5475 return
5476 }
5477
5478
5479
5480
5481 if mp != nil && mp.profilehz == 0 {
5482 return
5483 }
5484
5485
5486
5487
5488
5489
5490
5491 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5492 if f := findfunc(pc); f.valid() {
5493 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5494 cpuprof.lostAtomic++
5495 return
5496 }
5497 }
5498 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5499
5500
5501
5502 cpuprof.lostAtomic++
5503 return
5504 }
5505 }
5506
5507
5508
5509
5510
5511
5512
5513 getg().m.mallocing++
5514
5515 var u unwinder
5516 var stk [maxCPUProfStack]uintptr
5517 n := 0
5518 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5519 cgoOff := 0
5520
5521
5522
5523
5524
5525 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5526 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5527 cgoOff++
5528 }
5529 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5530 mp.cgoCallers[0] = 0
5531 }
5532
5533
5534 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5535 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5536
5537
5538 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5539 } else if mp != nil && mp.vdsoSP != 0 {
5540
5541
5542 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5543 } else {
5544 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5545 }
5546 n += tracebackPCs(&u, 0, stk[n:])
5547
5548 if n <= 0 {
5549
5550
5551 n = 2
5552 if inVDSOPage(pc) {
5553 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5554 } else if pc > firstmoduledata.etext {
5555
5556 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5557 }
5558 stk[0] = pc
5559 if mp.preemptoff != "" {
5560 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5561 } else {
5562 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5563 }
5564 }
5565
5566 if prof.hz.Load() != 0 {
5567
5568
5569
5570 var tagPtr *unsafe.Pointer
5571 if gp != nil && gp.m != nil && gp.m.curg != nil {
5572 tagPtr = &gp.m.curg.labels
5573 }
5574 cpuprof.add(tagPtr, stk[:n])
5575
5576 gprof := gp
5577 var mp *m
5578 var pp *p
5579 if gp != nil && gp.m != nil {
5580 if gp.m.curg != nil {
5581 gprof = gp.m.curg
5582 }
5583 mp = gp.m
5584 pp = gp.m.p.ptr()
5585 }
5586 traceCPUSample(gprof, mp, pp, stk[:n])
5587 }
5588 getg().m.mallocing--
5589 }
5590
5591
5592
5593 func setcpuprofilerate(hz int32) {
5594
5595 if hz < 0 {
5596 hz = 0
5597 }
5598
5599
5600
5601 gp := getg()
5602 gp.m.locks++
5603
5604
5605
5606
5607 setThreadCPUProfiler(0)
5608
5609 for !prof.signalLock.CompareAndSwap(0, 1) {
5610 osyield()
5611 }
5612 if prof.hz.Load() != hz {
5613 setProcessCPUProfiler(hz)
5614 prof.hz.Store(hz)
5615 }
5616 prof.signalLock.Store(0)
5617
5618 lock(&sched.lock)
5619 sched.profilehz = hz
5620 unlock(&sched.lock)
5621
5622 if hz != 0 {
5623 setThreadCPUProfiler(hz)
5624 }
5625
5626 gp.m.locks--
5627 }
5628
5629
5630
5631 func (pp *p) init(id int32) {
5632 pp.id = id
5633 pp.status = _Pgcstop
5634 pp.sudogcache = pp.sudogbuf[:0]
5635 pp.deferpool = pp.deferpoolbuf[:0]
5636 pp.wbBuf.reset()
5637 if pp.mcache == nil {
5638 if id == 0 {
5639 if mcache0 == nil {
5640 throw("missing mcache?")
5641 }
5642
5643
5644 pp.mcache = mcache0
5645 } else {
5646 pp.mcache = allocmcache()
5647 }
5648 }
5649 if raceenabled && pp.raceprocctx == 0 {
5650 if id == 0 {
5651 pp.raceprocctx = raceprocctx0
5652 raceprocctx0 = 0
5653 } else {
5654 pp.raceprocctx = raceproccreate()
5655 }
5656 }
5657 lockInit(&pp.timers.mu, lockRankTimers)
5658
5659
5660
5661 timerpMask.set(id)
5662
5663
5664 idlepMask.clear(id)
5665 }
5666
5667
5668
5669
5670
5671 func (pp *p) destroy() {
5672 assertLockHeld(&sched.lock)
5673 assertWorldStopped()
5674
5675
5676 for pp.runqhead != pp.runqtail {
5677
5678 pp.runqtail--
5679 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5680
5681 globrunqputhead(gp)
5682 }
5683 if pp.runnext != 0 {
5684 globrunqputhead(pp.runnext.ptr())
5685 pp.runnext = 0
5686 }
5687
5688
5689 getg().m.p.ptr().timers.take(&pp.timers)
5690
5691
5692 if gcphase != _GCoff {
5693 wbBufFlush1(pp)
5694 pp.gcw.dispose()
5695 }
5696 for i := range pp.sudogbuf {
5697 pp.sudogbuf[i] = nil
5698 }
5699 pp.sudogcache = pp.sudogbuf[:0]
5700 pp.pinnerCache = nil
5701 for j := range pp.deferpoolbuf {
5702 pp.deferpoolbuf[j] = nil
5703 }
5704 pp.deferpool = pp.deferpoolbuf[:0]
5705 systemstack(func() {
5706 for i := 0; i < pp.mspancache.len; i++ {
5707
5708 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5709 }
5710 pp.mspancache.len = 0
5711 lock(&mheap_.lock)
5712 pp.pcache.flush(&mheap_.pages)
5713 unlock(&mheap_.lock)
5714 })
5715 freemcache(pp.mcache)
5716 pp.mcache = nil
5717 gfpurge(pp)
5718 if raceenabled {
5719 if pp.timers.raceCtx != 0 {
5720
5721
5722
5723
5724
5725 mp := getg().m
5726 phold := mp.p.ptr()
5727 mp.p.set(pp)
5728
5729 racectxend(pp.timers.raceCtx)
5730 pp.timers.raceCtx = 0
5731
5732 mp.p.set(phold)
5733 }
5734 raceprocdestroy(pp.raceprocctx)
5735 pp.raceprocctx = 0
5736 }
5737 pp.gcAssistTime = 0
5738 pp.status = _Pdead
5739 }
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749 func procresize(nprocs int32) *p {
5750 assertLockHeld(&sched.lock)
5751 assertWorldStopped()
5752
5753 old := gomaxprocs
5754 if old < 0 || nprocs <= 0 {
5755 throw("procresize: invalid arg")
5756 }
5757 trace := traceAcquire()
5758 if trace.ok() {
5759 trace.Gomaxprocs(nprocs)
5760 traceRelease(trace)
5761 }
5762
5763
5764 now := nanotime()
5765 if sched.procresizetime != 0 {
5766 sched.totaltime += int64(old) * (now - sched.procresizetime)
5767 }
5768 sched.procresizetime = now
5769
5770 maskWords := (nprocs + 31) / 32
5771
5772
5773 if nprocs > int32(len(allp)) {
5774
5775
5776 lock(&allpLock)
5777 if nprocs <= int32(cap(allp)) {
5778 allp = allp[:nprocs]
5779 } else {
5780 nallp := make([]*p, nprocs)
5781
5782
5783 copy(nallp, allp[:cap(allp)])
5784 allp = nallp
5785 }
5786
5787 if maskWords <= int32(cap(idlepMask)) {
5788 idlepMask = idlepMask[:maskWords]
5789 timerpMask = timerpMask[:maskWords]
5790 } else {
5791 nidlepMask := make([]uint32, maskWords)
5792
5793 copy(nidlepMask, idlepMask)
5794 idlepMask = nidlepMask
5795
5796 ntimerpMask := make([]uint32, maskWords)
5797 copy(ntimerpMask, timerpMask)
5798 timerpMask = ntimerpMask
5799 }
5800 unlock(&allpLock)
5801 }
5802
5803
5804 for i := old; i < nprocs; i++ {
5805 pp := allp[i]
5806 if pp == nil {
5807 pp = new(p)
5808 }
5809 pp.init(i)
5810 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5811 }
5812
5813 gp := getg()
5814 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5815
5816 gp.m.p.ptr().status = _Prunning
5817 gp.m.p.ptr().mcache.prepareForSweep()
5818 } else {
5819
5820
5821
5822
5823
5824 if gp.m.p != 0 {
5825 trace := traceAcquire()
5826 if trace.ok() {
5827
5828
5829
5830 trace.GoSched()
5831 trace.ProcStop(gp.m.p.ptr())
5832 traceRelease(trace)
5833 }
5834 gp.m.p.ptr().m = 0
5835 }
5836 gp.m.p = 0
5837 pp := allp[0]
5838 pp.m = 0
5839 pp.status = _Pidle
5840 acquirep(pp)
5841 trace := traceAcquire()
5842 if trace.ok() {
5843 trace.GoStart()
5844 traceRelease(trace)
5845 }
5846 }
5847
5848
5849 mcache0 = nil
5850
5851
5852 for i := nprocs; i < old; i++ {
5853 pp := allp[i]
5854 pp.destroy()
5855
5856 }
5857
5858
5859 if int32(len(allp)) != nprocs {
5860 lock(&allpLock)
5861 allp = allp[:nprocs]
5862 idlepMask = idlepMask[:maskWords]
5863 timerpMask = timerpMask[:maskWords]
5864 unlock(&allpLock)
5865 }
5866
5867 var runnablePs *p
5868 for i := nprocs - 1; i >= 0; i-- {
5869 pp := allp[i]
5870 if gp.m.p.ptr() == pp {
5871 continue
5872 }
5873 pp.status = _Pidle
5874 if runqempty(pp) {
5875 pidleput(pp, now)
5876 } else {
5877 pp.m.set(mget())
5878 pp.link.set(runnablePs)
5879 runnablePs = pp
5880 }
5881 }
5882 stealOrder.reset(uint32(nprocs))
5883 var int32p *int32 = &gomaxprocs
5884 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5885 if old != nprocs {
5886
5887 gcCPULimiter.resetCapacity(now, nprocs)
5888 }
5889 return runnablePs
5890 }
5891
5892
5893
5894
5895
5896
5897
5898 func acquirep(pp *p) {
5899
5900 wirep(pp)
5901
5902
5903
5904
5905
5906 pp.mcache.prepareForSweep()
5907
5908 trace := traceAcquire()
5909 if trace.ok() {
5910 trace.ProcStart()
5911 traceRelease(trace)
5912 }
5913 }
5914
5915
5916
5917
5918
5919
5920
5921 func wirep(pp *p) {
5922 gp := getg()
5923
5924 if gp.m.p != 0 {
5925
5926
5927 systemstack(func() {
5928 throw("wirep: already in go")
5929 })
5930 }
5931 if pp.m != 0 || pp.status != _Pidle {
5932
5933
5934 systemstack(func() {
5935 id := int64(0)
5936 if pp.m != 0 {
5937 id = pp.m.ptr().id
5938 }
5939 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5940 throw("wirep: invalid p state")
5941 })
5942 }
5943 gp.m.p.set(pp)
5944 pp.m.set(gp.m)
5945 pp.status = _Prunning
5946 }
5947
5948
5949 func releasep() *p {
5950 trace := traceAcquire()
5951 if trace.ok() {
5952 trace.ProcStop(getg().m.p.ptr())
5953 traceRelease(trace)
5954 }
5955 return releasepNoTrace()
5956 }
5957
5958
5959 func releasepNoTrace() *p {
5960 gp := getg()
5961
5962 if gp.m.p == 0 {
5963 throw("releasep: invalid arg")
5964 }
5965 pp := gp.m.p.ptr()
5966 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5967 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5968 throw("releasep: invalid p state")
5969 }
5970 gp.m.p = 0
5971 pp.m = 0
5972 pp.status = _Pidle
5973 return pp
5974 }
5975
5976 func incidlelocked(v int32) {
5977 lock(&sched.lock)
5978 sched.nmidlelocked += v
5979 if v > 0 {
5980 checkdead()
5981 }
5982 unlock(&sched.lock)
5983 }
5984
5985
5986
5987
5988 func checkdead() {
5989 assertLockHeld(&sched.lock)
5990
5991
5992
5993
5994
5995
5996 if (islibrary || isarchive) && GOARCH != "wasm" {
5997 return
5998 }
5999
6000
6001
6002
6003
6004 if panicking.Load() > 0 {
6005 return
6006 }
6007
6008
6009
6010
6011
6012 var run0 int32
6013 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6014 run0 = 1
6015 }
6016
6017 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6018 if run > run0 {
6019 return
6020 }
6021 if run < 0 {
6022 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6023 unlock(&sched.lock)
6024 throw("checkdead: inconsistent counts")
6025 }
6026
6027 grunning := 0
6028 forEachG(func(gp *g) {
6029 if isSystemGoroutine(gp, false) {
6030 return
6031 }
6032 s := readgstatus(gp)
6033 switch s &^ _Gscan {
6034 case _Gwaiting,
6035 _Gpreempted:
6036 grunning++
6037 case _Grunnable,
6038 _Grunning,
6039 _Gsyscall:
6040 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6041 unlock(&sched.lock)
6042 throw("checkdead: runnable g")
6043 }
6044 })
6045 if grunning == 0 {
6046 unlock(&sched.lock)
6047 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6048 }
6049
6050
6051 if faketime != 0 {
6052 if when := timeSleepUntil(); when < maxWhen {
6053 faketime = when
6054
6055
6056 pp, _ := pidleget(faketime)
6057 if pp == nil {
6058
6059
6060 unlock(&sched.lock)
6061 throw("checkdead: no p for timer")
6062 }
6063 mp := mget()
6064 if mp == nil {
6065
6066
6067 unlock(&sched.lock)
6068 throw("checkdead: no m for timer")
6069 }
6070
6071
6072
6073 sched.nmspinning.Add(1)
6074 mp.spinning = true
6075 mp.nextp.set(pp)
6076 notewakeup(&mp.park)
6077 return
6078 }
6079 }
6080
6081
6082 for _, pp := range allp {
6083 if len(pp.timers.heap) > 0 {
6084 return
6085 }
6086 }
6087
6088 unlock(&sched.lock)
6089 fatal("all goroutines are asleep - deadlock!")
6090 }
6091
6092
6093
6094
6095
6096
6097 var forcegcperiod int64 = 2 * 60 * 1e9
6098
6099
6100
6101 var needSysmonWorkaround bool = false
6102
6103
6104
6105
6106 const haveSysmon = GOARCH != "wasm"
6107
6108
6109
6110
6111 func sysmon() {
6112 lock(&sched.lock)
6113 sched.nmsys++
6114 checkdead()
6115 unlock(&sched.lock)
6116
6117 lasttrace := int64(0)
6118 idle := 0
6119 delay := uint32(0)
6120
6121 for {
6122 if idle == 0 {
6123 delay = 20
6124 } else if idle > 50 {
6125 delay *= 2
6126 }
6127 if delay > 10*1000 {
6128 delay = 10 * 1000
6129 }
6130 usleep(delay)
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147 now := nanotime()
6148 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6149 lock(&sched.lock)
6150 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6151 syscallWake := false
6152 next := timeSleepUntil()
6153 if next > now {
6154 sched.sysmonwait.Store(true)
6155 unlock(&sched.lock)
6156
6157
6158 sleep := forcegcperiod / 2
6159 if next-now < sleep {
6160 sleep = next - now
6161 }
6162 shouldRelax := sleep >= osRelaxMinNS
6163 if shouldRelax {
6164 osRelax(true)
6165 }
6166 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6167 if shouldRelax {
6168 osRelax(false)
6169 }
6170 lock(&sched.lock)
6171 sched.sysmonwait.Store(false)
6172 noteclear(&sched.sysmonnote)
6173 }
6174 if syscallWake {
6175 idle = 0
6176 delay = 20
6177 }
6178 }
6179 unlock(&sched.lock)
6180 }
6181
6182 lock(&sched.sysmonlock)
6183
6184
6185 now = nanotime()
6186
6187
6188 if *cgo_yield != nil {
6189 asmcgocall(*cgo_yield, nil)
6190 }
6191
6192 lastpoll := sched.lastpoll.Load()
6193 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6194 sched.lastpoll.CompareAndSwap(lastpoll, now)
6195 list, delta := netpoll(0)
6196 if !list.empty() {
6197
6198
6199
6200
6201
6202
6203
6204 incidlelocked(-1)
6205 injectglist(&list)
6206 incidlelocked(1)
6207 netpollAdjustWaiters(delta)
6208 }
6209 }
6210 if GOOS == "netbsd" && needSysmonWorkaround {
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226 if next := timeSleepUntil(); next < now {
6227 startm(nil, false, false)
6228 }
6229 }
6230 if scavenger.sysmonWake.Load() != 0 {
6231
6232 scavenger.wake()
6233 }
6234
6235
6236 if retake(now) != 0 {
6237 idle = 0
6238 } else {
6239 idle++
6240 }
6241
6242 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6243 lock(&forcegc.lock)
6244 forcegc.idle.Store(false)
6245 var list gList
6246 list.push(forcegc.g)
6247 injectglist(&list)
6248 unlock(&forcegc.lock)
6249 }
6250 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6251 lasttrace = now
6252 schedtrace(debug.scheddetail > 0)
6253 }
6254 unlock(&sched.sysmonlock)
6255 }
6256 }
6257
6258 type sysmontick struct {
6259 schedtick uint32
6260 syscalltick uint32
6261 schedwhen int64
6262 syscallwhen int64
6263 }
6264
6265
6266
6267 const forcePreemptNS = 10 * 1000 * 1000
6268
6269 func retake(now int64) uint32 {
6270 n := 0
6271
6272
6273 lock(&allpLock)
6274
6275
6276
6277 for i := 0; i < len(allp); i++ {
6278 pp := allp[i]
6279 if pp == nil {
6280
6281
6282 continue
6283 }
6284 pd := &pp.sysmontick
6285 s := pp.status
6286 sysretake := false
6287 if s == _Prunning || s == _Psyscall {
6288
6289
6290
6291
6292 t := int64(pp.schedtick)
6293 if int64(pd.schedtick) != t {
6294 pd.schedtick = uint32(t)
6295 pd.schedwhen = now
6296 } else if pd.schedwhen+forcePreemptNS <= now {
6297 preemptone(pp)
6298
6299
6300 sysretake = true
6301 }
6302 }
6303 if s == _Psyscall {
6304
6305 t := int64(pp.syscalltick)
6306 if !sysretake && int64(pd.syscalltick) != t {
6307 pd.syscalltick = uint32(t)
6308 pd.syscallwhen = now
6309 continue
6310 }
6311
6312
6313
6314 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6315 continue
6316 }
6317
6318 unlock(&allpLock)
6319
6320
6321
6322
6323 incidlelocked(-1)
6324 trace := traceAcquire()
6325 if atomic.Cas(&pp.status, s, _Pidle) {
6326 if trace.ok() {
6327 trace.ProcSteal(pp, false)
6328 traceRelease(trace)
6329 }
6330 n++
6331 pp.syscalltick++
6332 handoffp(pp)
6333 } else if trace.ok() {
6334 traceRelease(trace)
6335 }
6336 incidlelocked(1)
6337 lock(&allpLock)
6338 }
6339 }
6340 unlock(&allpLock)
6341 return uint32(n)
6342 }
6343
6344
6345
6346
6347
6348
6349 func preemptall() bool {
6350 res := false
6351 for _, pp := range allp {
6352 if pp.status != _Prunning {
6353 continue
6354 }
6355 if preemptone(pp) {
6356 res = true
6357 }
6358 }
6359 return res
6360 }
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372 func preemptone(pp *p) bool {
6373 mp := pp.m.ptr()
6374 if mp == nil || mp == getg().m {
6375 return false
6376 }
6377 gp := mp.curg
6378 if gp == nil || gp == mp.g0 {
6379 return false
6380 }
6381
6382 gp.preempt = true
6383
6384
6385
6386
6387
6388 gp.stackguard0 = stackPreempt
6389
6390
6391 if preemptMSupported && debug.asyncpreemptoff == 0 {
6392 pp.preempt = true
6393 preemptM(mp)
6394 }
6395
6396 return true
6397 }
6398
6399 var starttime int64
6400
6401 func schedtrace(detailed bool) {
6402 now := nanotime()
6403 if starttime == 0 {
6404 starttime = now
6405 }
6406
6407 lock(&sched.lock)
6408 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6409 if detailed {
6410 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6411 }
6412
6413
6414
6415 for i, pp := range allp {
6416 h := atomic.Load(&pp.runqhead)
6417 t := atomic.Load(&pp.runqtail)
6418 if detailed {
6419 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6420 mp := pp.m.ptr()
6421 if mp != nil {
6422 print(mp.id)
6423 } else {
6424 print("nil")
6425 }
6426 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6427 } else {
6428
6429
6430 print(" ")
6431 if i == 0 {
6432 print("[ ")
6433 }
6434 print(t - h)
6435 if i == len(allp)-1 {
6436 print(" ]")
6437 }
6438 }
6439 }
6440
6441 if !detailed {
6442
6443 print(" schedticks=[ ")
6444 for _, pp := range allp {
6445 print(pp.schedtick)
6446 print(" ")
6447 }
6448 print("]\n")
6449 }
6450
6451 if !detailed {
6452 unlock(&sched.lock)
6453 return
6454 }
6455
6456 for mp := allm; mp != nil; mp = mp.alllink {
6457 pp := mp.p.ptr()
6458 print(" M", mp.id, ": p=")
6459 if pp != nil {
6460 print(pp.id)
6461 } else {
6462 print("nil")
6463 }
6464 print(" curg=")
6465 if mp.curg != nil {
6466 print(mp.curg.goid)
6467 } else {
6468 print("nil")
6469 }
6470 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6471 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6472 print(lockedg.goid)
6473 } else {
6474 print("nil")
6475 }
6476 print("\n")
6477 }
6478
6479 forEachG(func(gp *g) {
6480 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6481 if gp.m != nil {
6482 print(gp.m.id)
6483 } else {
6484 print("nil")
6485 }
6486 print(" lockedm=")
6487 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6488 print(lockedm.id)
6489 } else {
6490 print("nil")
6491 }
6492 print("\n")
6493 })
6494 unlock(&sched.lock)
6495 }
6496
6497
6498
6499
6500
6501
6502 func schedEnableUser(enable bool) {
6503 lock(&sched.lock)
6504 if sched.disable.user == !enable {
6505 unlock(&sched.lock)
6506 return
6507 }
6508 sched.disable.user = !enable
6509 if enable {
6510 n := sched.disable.n
6511 sched.disable.n = 0
6512 globrunqputbatch(&sched.disable.runnable, n)
6513 unlock(&sched.lock)
6514 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6515 startm(nil, false, false)
6516 }
6517 } else {
6518 unlock(&sched.lock)
6519 }
6520 }
6521
6522
6523
6524
6525
6526 func schedEnabled(gp *g) bool {
6527 assertLockHeld(&sched.lock)
6528
6529 if sched.disable.user {
6530 return isSystemGoroutine(gp, true)
6531 }
6532 return true
6533 }
6534
6535
6536
6537
6538
6539
6540 func mput(mp *m) {
6541 assertLockHeld(&sched.lock)
6542
6543 mp.schedlink = sched.midle
6544 sched.midle.set(mp)
6545 sched.nmidle++
6546 checkdead()
6547 }
6548
6549
6550
6551
6552
6553
6554 func mget() *m {
6555 assertLockHeld(&sched.lock)
6556
6557 mp := sched.midle.ptr()
6558 if mp != nil {
6559 sched.midle = mp.schedlink
6560 sched.nmidle--
6561 }
6562 return mp
6563 }
6564
6565
6566
6567
6568
6569
6570 func globrunqput(gp *g) {
6571 assertLockHeld(&sched.lock)
6572
6573 sched.runq.pushBack(gp)
6574 sched.runqsize++
6575 }
6576
6577
6578
6579
6580
6581
6582 func globrunqputhead(gp *g) {
6583 assertLockHeld(&sched.lock)
6584
6585 sched.runq.push(gp)
6586 sched.runqsize++
6587 }
6588
6589
6590
6591
6592
6593
6594
6595 func globrunqputbatch(batch *gQueue, n int32) {
6596 assertLockHeld(&sched.lock)
6597
6598 sched.runq.pushBackAll(*batch)
6599 sched.runqsize += n
6600 *batch = gQueue{}
6601 }
6602
6603
6604
6605 func globrunqget(pp *p, max int32) *g {
6606 assertLockHeld(&sched.lock)
6607
6608 if sched.runqsize == 0 {
6609 return nil
6610 }
6611
6612 n := sched.runqsize/gomaxprocs + 1
6613 if n > sched.runqsize {
6614 n = sched.runqsize
6615 }
6616 if max > 0 && n > max {
6617 n = max
6618 }
6619 if n > int32(len(pp.runq))/2 {
6620 n = int32(len(pp.runq)) / 2
6621 }
6622
6623 sched.runqsize -= n
6624
6625 gp := sched.runq.pop()
6626 n--
6627 for ; n > 0; n-- {
6628 gp1 := sched.runq.pop()
6629 runqput(pp, gp1, false)
6630 }
6631 return gp
6632 }
6633
6634
6635 type pMask []uint32
6636
6637
6638 func (p pMask) read(id uint32) bool {
6639 word := id / 32
6640 mask := uint32(1) << (id % 32)
6641 return (atomic.Load(&p[word]) & mask) != 0
6642 }
6643
6644
6645 func (p pMask) set(id int32) {
6646 word := id / 32
6647 mask := uint32(1) << (id % 32)
6648 atomic.Or(&p[word], mask)
6649 }
6650
6651
6652 func (p pMask) clear(id int32) {
6653 word := id / 32
6654 mask := uint32(1) << (id % 32)
6655 atomic.And(&p[word], ^mask)
6656 }
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669 func pidleput(pp *p, now int64) int64 {
6670 assertLockHeld(&sched.lock)
6671
6672 if !runqempty(pp) {
6673 throw("pidleput: P has non-empty run queue")
6674 }
6675 if now == 0 {
6676 now = nanotime()
6677 }
6678 if pp.timers.len.Load() == 0 {
6679 timerpMask.clear(pp.id)
6680 }
6681 idlepMask.set(pp.id)
6682 pp.link = sched.pidle
6683 sched.pidle.set(pp)
6684 sched.npidle.Add(1)
6685 if !pp.limiterEvent.start(limiterEventIdle, now) {
6686 throw("must be able to track idle limiter event")
6687 }
6688 return now
6689 }
6690
6691
6692
6693
6694
6695
6696
6697
6698 func pidleget(now int64) (*p, int64) {
6699 assertLockHeld(&sched.lock)
6700
6701 pp := sched.pidle.ptr()
6702 if pp != nil {
6703
6704 if now == 0 {
6705 now = nanotime()
6706 }
6707 timerpMask.set(pp.id)
6708 idlepMask.clear(pp.id)
6709 sched.pidle = pp.link
6710 sched.npidle.Add(-1)
6711 pp.limiterEvent.stop(limiterEventIdle, now)
6712 }
6713 return pp, now
6714 }
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726 func pidlegetSpinning(now int64) (*p, int64) {
6727 assertLockHeld(&sched.lock)
6728
6729 pp, now := pidleget(now)
6730 if pp == nil {
6731
6732
6733
6734 sched.needspinning.Store(1)
6735 return nil, now
6736 }
6737
6738 return pp, now
6739 }
6740
6741
6742
6743 func runqempty(pp *p) bool {
6744
6745
6746
6747
6748 for {
6749 head := atomic.Load(&pp.runqhead)
6750 tail := atomic.Load(&pp.runqtail)
6751 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6752 if tail == atomic.Load(&pp.runqtail) {
6753 return head == tail && runnext == 0
6754 }
6755 }
6756 }
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767 const randomizeScheduler = raceenabled
6768
6769
6770
6771
6772
6773
6774 func runqput(pp *p, gp *g, next bool) {
6775 if !haveSysmon && next {
6776
6777
6778
6779
6780
6781
6782
6783
6784 next = false
6785 }
6786 if randomizeScheduler && next && randn(2) == 0 {
6787 next = false
6788 }
6789
6790 if next {
6791 retryNext:
6792 oldnext := pp.runnext
6793 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6794 goto retryNext
6795 }
6796 if oldnext == 0 {
6797 return
6798 }
6799
6800 gp = oldnext.ptr()
6801 }
6802
6803 retry:
6804 h := atomic.LoadAcq(&pp.runqhead)
6805 t := pp.runqtail
6806 if t-h < uint32(len(pp.runq)) {
6807 pp.runq[t%uint32(len(pp.runq))].set(gp)
6808 atomic.StoreRel(&pp.runqtail, t+1)
6809 return
6810 }
6811 if runqputslow(pp, gp, h, t) {
6812 return
6813 }
6814
6815 goto retry
6816 }
6817
6818
6819
6820 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6821 var batch [len(pp.runq)/2 + 1]*g
6822
6823
6824 n := t - h
6825 n = n / 2
6826 if n != uint32(len(pp.runq)/2) {
6827 throw("runqputslow: queue is not full")
6828 }
6829 for i := uint32(0); i < n; i++ {
6830 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6831 }
6832 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6833 return false
6834 }
6835 batch[n] = gp
6836
6837 if randomizeScheduler {
6838 for i := uint32(1); i <= n; i++ {
6839 j := cheaprandn(i + 1)
6840 batch[i], batch[j] = batch[j], batch[i]
6841 }
6842 }
6843
6844
6845 for i := uint32(0); i < n; i++ {
6846 batch[i].schedlink.set(batch[i+1])
6847 }
6848 var q gQueue
6849 q.head.set(batch[0])
6850 q.tail.set(batch[n])
6851
6852
6853 lock(&sched.lock)
6854 globrunqputbatch(&q, int32(n+1))
6855 unlock(&sched.lock)
6856 return true
6857 }
6858
6859
6860
6861
6862
6863 func runqputbatch(pp *p, q *gQueue, qsize int) {
6864 h := atomic.LoadAcq(&pp.runqhead)
6865 t := pp.runqtail
6866 n := uint32(0)
6867 for !q.empty() && t-h < uint32(len(pp.runq)) {
6868 gp := q.pop()
6869 pp.runq[t%uint32(len(pp.runq))].set(gp)
6870 t++
6871 n++
6872 }
6873 qsize -= int(n)
6874
6875 if randomizeScheduler {
6876 off := func(o uint32) uint32 {
6877 return (pp.runqtail + o) % uint32(len(pp.runq))
6878 }
6879 for i := uint32(1); i < n; i++ {
6880 j := cheaprandn(i + 1)
6881 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6882 }
6883 }
6884
6885 atomic.StoreRel(&pp.runqtail, t)
6886 if !q.empty() {
6887 lock(&sched.lock)
6888 globrunqputbatch(q, int32(qsize))
6889 unlock(&sched.lock)
6890 }
6891 }
6892
6893
6894
6895
6896
6897 func runqget(pp *p) (gp *g, inheritTime bool) {
6898
6899 next := pp.runnext
6900
6901
6902
6903 if next != 0 && pp.runnext.cas(next, 0) {
6904 return next.ptr(), true
6905 }
6906
6907 for {
6908 h := atomic.LoadAcq(&pp.runqhead)
6909 t := pp.runqtail
6910 if t == h {
6911 return nil, false
6912 }
6913 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6914 if atomic.CasRel(&pp.runqhead, h, h+1) {
6915 return gp, false
6916 }
6917 }
6918 }
6919
6920
6921
6922 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6923 oldNext := pp.runnext
6924 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6925 drainQ.pushBack(oldNext.ptr())
6926 n++
6927 }
6928
6929 retry:
6930 h := atomic.LoadAcq(&pp.runqhead)
6931 t := pp.runqtail
6932 qn := t - h
6933 if qn == 0 {
6934 return
6935 }
6936 if qn > uint32(len(pp.runq)) {
6937 goto retry
6938 }
6939
6940 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6941 goto retry
6942 }
6943
6944
6945
6946
6947
6948
6949
6950
6951 for i := uint32(0); i < qn; i++ {
6952 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6953 drainQ.pushBack(gp)
6954 n++
6955 }
6956 return
6957 }
6958
6959
6960
6961
6962
6963 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6964 for {
6965 h := atomic.LoadAcq(&pp.runqhead)
6966 t := atomic.LoadAcq(&pp.runqtail)
6967 n := t - h
6968 n = n - n/2
6969 if n == 0 {
6970 if stealRunNextG {
6971
6972 if next := pp.runnext; next != 0 {
6973 if pp.status == _Prunning {
6974
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984 if !osHasLowResTimer {
6985 usleep(3)
6986 } else {
6987
6988
6989
6990 osyield()
6991 }
6992 }
6993 if !pp.runnext.cas(next, 0) {
6994 continue
6995 }
6996 batch[batchHead%uint32(len(batch))] = next
6997 return 1
6998 }
6999 }
7000 return 0
7001 }
7002 if n > uint32(len(pp.runq)/2) {
7003 continue
7004 }
7005 for i := uint32(0); i < n; i++ {
7006 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7007 batch[(batchHead+i)%uint32(len(batch))] = g
7008 }
7009 if atomic.CasRel(&pp.runqhead, h, h+n) {
7010 return n
7011 }
7012 }
7013 }
7014
7015
7016
7017
7018 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7019 t := pp.runqtail
7020 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7021 if n == 0 {
7022 return nil
7023 }
7024 n--
7025 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7026 if n == 0 {
7027 return gp
7028 }
7029 h := atomic.LoadAcq(&pp.runqhead)
7030 if t-h+n >= uint32(len(pp.runq)) {
7031 throw("runqsteal: runq overflow")
7032 }
7033 atomic.StoreRel(&pp.runqtail, t+n)
7034 return gp
7035 }
7036
7037
7038
7039 type gQueue struct {
7040 head guintptr
7041 tail guintptr
7042 }
7043
7044
7045 func (q *gQueue) empty() bool {
7046 return q.head == 0
7047 }
7048
7049
7050 func (q *gQueue) push(gp *g) {
7051 gp.schedlink = q.head
7052 q.head.set(gp)
7053 if q.tail == 0 {
7054 q.tail.set(gp)
7055 }
7056 }
7057
7058
7059 func (q *gQueue) pushBack(gp *g) {
7060 gp.schedlink = 0
7061 if q.tail != 0 {
7062 q.tail.ptr().schedlink.set(gp)
7063 } else {
7064 q.head.set(gp)
7065 }
7066 q.tail.set(gp)
7067 }
7068
7069
7070
7071 func (q *gQueue) pushBackAll(q2 gQueue) {
7072 if q2.tail == 0 {
7073 return
7074 }
7075 q2.tail.ptr().schedlink = 0
7076 if q.tail != 0 {
7077 q.tail.ptr().schedlink = q2.head
7078 } else {
7079 q.head = q2.head
7080 }
7081 q.tail = q2.tail
7082 }
7083
7084
7085
7086 func (q *gQueue) pop() *g {
7087 gp := q.head.ptr()
7088 if gp != nil {
7089 q.head = gp.schedlink
7090 if q.head == 0 {
7091 q.tail = 0
7092 }
7093 }
7094 return gp
7095 }
7096
7097
7098 func (q *gQueue) popList() gList {
7099 stack := gList{q.head}
7100 *q = gQueue{}
7101 return stack
7102 }
7103
7104
7105
7106 type gList struct {
7107 head guintptr
7108 }
7109
7110
7111 func (l *gList) empty() bool {
7112 return l.head == 0
7113 }
7114
7115
7116 func (l *gList) push(gp *g) {
7117 gp.schedlink = l.head
7118 l.head.set(gp)
7119 }
7120
7121
7122 func (l *gList) pushAll(q gQueue) {
7123 if !q.empty() {
7124 q.tail.ptr().schedlink = l.head
7125 l.head = q.head
7126 }
7127 }
7128
7129
7130 func (l *gList) pop() *g {
7131 gp := l.head.ptr()
7132 if gp != nil {
7133 l.head = gp.schedlink
7134 }
7135 return gp
7136 }
7137
7138
7139 func setMaxThreads(in int) (out int) {
7140 lock(&sched.lock)
7141 out = int(sched.maxmcount)
7142 if in > 0x7fffffff {
7143 sched.maxmcount = 0x7fffffff
7144 } else {
7145 sched.maxmcount = int32(in)
7146 }
7147 checkmcount()
7148 unlock(&sched.lock)
7149 return
7150 }
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164 func procPin() int {
7165 gp := getg()
7166 mp := gp.m
7167
7168 mp.locks++
7169 return int(mp.p.ptr().id)
7170 }
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184 func procUnpin() {
7185 gp := getg()
7186 gp.m.locks--
7187 }
7188
7189
7190
7191 func sync_runtime_procPin() int {
7192 return procPin()
7193 }
7194
7195
7196
7197 func sync_runtime_procUnpin() {
7198 procUnpin()
7199 }
7200
7201
7202
7203 func sync_atomic_runtime_procPin() int {
7204 return procPin()
7205 }
7206
7207
7208
7209 func sync_atomic_runtime_procUnpin() {
7210 procUnpin()
7211 }
7212
7213
7214
7215
7216
7217 func internal_sync_runtime_canSpin(i int) bool {
7218
7219
7220
7221
7222
7223 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7224 return false
7225 }
7226 if p := getg().m.p.ptr(); !runqempty(p) {
7227 return false
7228 }
7229 return true
7230 }
7231
7232
7233
7234 func internal_sync_runtime_doSpin() {
7235 procyield(active_spin_cnt)
7236 }
7237
7238
7239
7240
7241
7242
7243
7244
7245
7246
7247
7248
7249
7250
7251
7252 func sync_runtime_canSpin(i int) bool {
7253 return internal_sync_runtime_canSpin(i)
7254 }
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267
7268 func sync_runtime_doSpin() {
7269 internal_sync_runtime_doSpin()
7270 }
7271
7272 var stealOrder randomOrder
7273
7274
7275
7276
7277
7278 type randomOrder struct {
7279 count uint32
7280 coprimes []uint32
7281 }
7282
7283 type randomEnum struct {
7284 i uint32
7285 count uint32
7286 pos uint32
7287 inc uint32
7288 }
7289
7290 func (ord *randomOrder) reset(count uint32) {
7291 ord.count = count
7292 ord.coprimes = ord.coprimes[:0]
7293 for i := uint32(1); i <= count; i++ {
7294 if gcd(i, count) == 1 {
7295 ord.coprimes = append(ord.coprimes, i)
7296 }
7297 }
7298 }
7299
7300 func (ord *randomOrder) start(i uint32) randomEnum {
7301 return randomEnum{
7302 count: ord.count,
7303 pos: i % ord.count,
7304 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7305 }
7306 }
7307
7308 func (enum *randomEnum) done() bool {
7309 return enum.i == enum.count
7310 }
7311
7312 func (enum *randomEnum) next() {
7313 enum.i++
7314 enum.pos = (enum.pos + enum.inc) % enum.count
7315 }
7316
7317 func (enum *randomEnum) position() uint32 {
7318 return enum.pos
7319 }
7320
7321 func gcd(a, b uint32) uint32 {
7322 for b != 0 {
7323 a, b = b, a%b
7324 }
7325 return a
7326 }
7327
7328
7329
7330 type initTask struct {
7331 state uint32
7332 nfns uint32
7333
7334 }
7335
7336
7337
7338 var inittrace tracestat
7339
7340 type tracestat struct {
7341 active bool
7342 id uint64
7343 allocs uint64
7344 bytes uint64
7345 }
7346
7347 func doInit(ts []*initTask) {
7348 for _, t := range ts {
7349 doInit1(t)
7350 }
7351 }
7352
7353 func doInit1(t *initTask) {
7354 switch t.state {
7355 case 2:
7356 return
7357 case 1:
7358 throw("recursive call during initialization - linker skew")
7359 default:
7360 t.state = 1
7361
7362 var (
7363 start int64
7364 before tracestat
7365 )
7366
7367 if inittrace.active {
7368 start = nanotime()
7369
7370 before = inittrace
7371 }
7372
7373 if t.nfns == 0 {
7374
7375 throw("inittask with no functions")
7376 }
7377
7378 firstFunc := add(unsafe.Pointer(t), 8)
7379 for i := uint32(0); i < t.nfns; i++ {
7380 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7381 f := *(*func())(unsafe.Pointer(&p))
7382 f()
7383 }
7384
7385 if inittrace.active {
7386 end := nanotime()
7387
7388 after := inittrace
7389
7390 f := *(*func())(unsafe.Pointer(&firstFunc))
7391 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7392
7393 var sbuf [24]byte
7394 print("init ", pkg, " @")
7395 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7396 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7397 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7398 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7399 print("\n")
7400 }
7401
7402 t.state = 2
7403 }
7404 }
7405
View as plain text