Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goexperiment"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/exithook"
15 "internal/runtime/sys"
16 "internal/strconv"
17 "internal/stringslite"
18 "unsafe"
19 )
20
21
22 var modinfo string
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 var (
119 m0 m
120 g0 g
121 mcache0 *mcache
122 raceprocctx0 uintptr
123 raceFiniLock mutex
124 )
125
126
127
128 var runtime_inittasks []*initTask
129
130
131
132
133
134 var main_init_done chan bool
135
136
137 func main_main()
138
139
140 var mainStarted bool
141
142
143 var runtimeInitTime int64
144
145
146 var initSigmask sigset
147
148
149 func main() {
150 mp := getg().m
151
152
153
154 mp.g0.racectx = 0
155
156
157
158
159 if goarch.PtrSize == 8 {
160 maxstacksize = 1000000000
161 } else {
162 maxstacksize = 250000000
163 }
164
165
166
167
168 maxstackceiling = 2 * maxstacksize
169
170
171 mainStarted = true
172
173 if haveSysmon {
174 systemstack(func() {
175 newm(sysmon, nil, -1)
176 })
177 }
178
179
180
181
182
183
184
185 lockOSThread()
186
187 if mp != &m0 {
188 throw("runtime.main not on m0")
189 }
190
191
192
193 runtimeInitTime = nanotime()
194 if runtimeInitTime == 0 {
195 throw("nanotime returning zero")
196 }
197
198 if debug.inittrace != 0 {
199 inittrace.id = getg().goid
200 inittrace.active = true
201 }
202
203 doInit(runtime_inittasks)
204
205
206 needUnlock := true
207 defer func() {
208 if needUnlock {
209 unlockOSThread()
210 }
211 }()
212
213 gcenable()
214 defaultGOMAXPROCSUpdateEnable()
215
216 main_init_done = make(chan bool)
217 if iscgo {
218 if _cgo_pthread_key_created == nil {
219 throw("_cgo_pthread_key_created missing")
220 }
221
222 if _cgo_thread_start == nil {
223 throw("_cgo_thread_start missing")
224 }
225 if GOOS != "windows" {
226 if _cgo_setenv == nil {
227 throw("_cgo_setenv missing")
228 }
229 if _cgo_unsetenv == nil {
230 throw("_cgo_unsetenv missing")
231 }
232 }
233 if _cgo_notify_runtime_init_done == nil {
234 throw("_cgo_notify_runtime_init_done missing")
235 }
236
237
238 if set_crosscall2 == nil {
239 throw("set_crosscall2 missing")
240 }
241 set_crosscall2()
242
243
244
245 startTemplateThread()
246 cgocall(_cgo_notify_runtime_init_done, nil)
247 }
248
249
250
251
252
253
254
255
256 last := lastmoduledatap
257 for m := &firstmoduledata; true; m = m.next {
258 doInit(m.inittasks)
259 if m == last {
260 break
261 }
262 }
263
264
265
266 inittrace.active = false
267
268 close(main_init_done)
269
270 needUnlock = false
271 unlockOSThread()
272
273 if isarchive || islibrary {
274
275
276 if GOARCH == "wasm" {
277
278
279
280
281
282
283
284 pause(sys.GetCallerSP() - 16)
285 panic("unreachable")
286 }
287 return
288 }
289 fn := main_main
290 fn()
291
292
293
294
295
296
297
298
299 exitHooksRun := false
300 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
301 runExitHooks(0)
302 exitHooksRun = true
303 lsandoleakcheck()
304 }
305
306
307
308
309
310 if runningPanicDefers.Load() != 0 {
311
312 for c := 0; c < 1000; c++ {
313 if runningPanicDefers.Load() == 0 {
314 break
315 }
316 Gosched()
317 }
318 }
319 if panicking.Load() != 0 {
320 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
321 }
322 if !exitHooksRun {
323 runExitHooks(0)
324 }
325 if raceenabled {
326 racefini()
327 }
328
329 exit(0)
330 for {
331 var x *int32
332 *x = 0
333 }
334 }
335
336
337
338
339 func os_beforeExit(exitCode int) {
340 runExitHooks(exitCode)
341 if exitCode == 0 && raceenabled {
342 racefini()
343 }
344
345
346 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
347 lsandoleakcheck()
348 }
349 }
350
351 func init() {
352 exithook.Gosched = Gosched
353 exithook.Goid = func() uint64 { return getg().goid }
354 exithook.Throw = throw
355 }
356
357 func runExitHooks(code int) {
358 exithook.Run(code)
359 }
360
361
362 func init() {
363 go forcegchelper()
364 }
365
366 func forcegchelper() {
367 forcegc.g = getg()
368 lockInit(&forcegc.lock, lockRankForcegc)
369 for {
370 lock(&forcegc.lock)
371 if forcegc.idle.Load() {
372 throw("forcegc: phase error")
373 }
374 forcegc.idle.Store(true)
375 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
376
377 if debug.gctrace > 0 {
378 println("GC forced")
379 }
380
381 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
382 }
383 }
384
385
386
387
388
389 func Gosched() {
390 checkTimeouts()
391 mcall(gosched_m)
392 }
393
394
395
396
397
398 func goschedguarded() {
399 mcall(goschedguarded_m)
400 }
401
402
403
404
405
406
407 func goschedIfBusy() {
408 gp := getg()
409
410
411 if !gp.preempt && sched.npidle.Load() > 0 {
412 return
413 }
414 mcall(gosched_m)
415 }
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
446 if reason != waitReasonSleep {
447 checkTimeouts()
448 }
449 mp := acquirem()
450 gp := mp.curg
451 status := readgstatus(gp)
452 if status != _Grunning && status != _Gscanrunning {
453 throw("gopark: bad g status")
454 }
455 mp.waitlock = lock
456 mp.waitunlockf = unlockf
457 gp.waitreason = reason
458 mp.waitTraceBlockReason = traceReason
459 mp.waitTraceSkip = traceskip
460 releasem(mp)
461
462 mcall(park_m)
463 }
464
465
466
467 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
468 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
469 }
470
471
472
473
474
475
476
477
478
479
480
481 func goready(gp *g, traceskip int) {
482 systemstack(func() {
483 ready(gp, traceskip, true)
484 })
485 }
486
487
488 func acquireSudog() *sudog {
489
490
491
492
493
494
495
496
497 mp := acquirem()
498 pp := mp.p.ptr()
499 if len(pp.sudogcache) == 0 {
500 lock(&sched.sudoglock)
501
502 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
503 s := sched.sudogcache
504 sched.sudogcache = s.next
505 s.next = nil
506 pp.sudogcache = append(pp.sudogcache, s)
507 }
508 unlock(&sched.sudoglock)
509
510 if len(pp.sudogcache) == 0 {
511 pp.sudogcache = append(pp.sudogcache, new(sudog))
512 }
513 }
514 n := len(pp.sudogcache)
515 s := pp.sudogcache[n-1]
516 pp.sudogcache[n-1] = nil
517 pp.sudogcache = pp.sudogcache[:n-1]
518 if s.elem.get() != nil {
519 throw("acquireSudog: found s.elem != nil in cache")
520 }
521 releasem(mp)
522 return s
523 }
524
525
526 func releaseSudog(s *sudog) {
527 if s.elem.get() != nil {
528 throw("runtime: sudog with non-nil elem")
529 }
530 if s.isSelect {
531 throw("runtime: sudog with non-false isSelect")
532 }
533 if s.next != nil {
534 throw("runtime: sudog with non-nil next")
535 }
536 if s.prev != nil {
537 throw("runtime: sudog with non-nil prev")
538 }
539 if s.waitlink != nil {
540 throw("runtime: sudog with non-nil waitlink")
541 }
542 if s.c.get() != nil {
543 throw("runtime: sudog with non-nil c")
544 }
545 gp := getg()
546 if gp.param != nil {
547 throw("runtime: releaseSudog with non-nil gp.param")
548 }
549 mp := acquirem()
550 pp := mp.p.ptr()
551 if len(pp.sudogcache) == cap(pp.sudogcache) {
552
553 var first, last *sudog
554 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
555 n := len(pp.sudogcache)
556 p := pp.sudogcache[n-1]
557 pp.sudogcache[n-1] = nil
558 pp.sudogcache = pp.sudogcache[:n-1]
559 if first == nil {
560 first = p
561 } else {
562 last.next = p
563 }
564 last = p
565 }
566 lock(&sched.sudoglock)
567 last.next = sched.sudogcache
568 sched.sudogcache = first
569 unlock(&sched.sudoglock)
570 }
571 pp.sudogcache = append(pp.sudogcache, s)
572 releasem(mp)
573 }
574
575
576 func badmcall(fn func(*g)) {
577 throw("runtime: mcall called on m->g0 stack")
578 }
579
580 func badmcall2(fn func(*g)) {
581 throw("runtime: mcall function returned")
582 }
583
584 func badreflectcall() {
585 panic(plainError("arg size to reflect.call more than 1GB"))
586 }
587
588
589
590 func badmorestackg0() {
591 if !crashStackImplemented {
592 writeErrStr("fatal: morestack on g0\n")
593 return
594 }
595
596 g := getg()
597 switchToCrashStack(func() {
598 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
599 g.m.traceback = 2
600 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
601 print("\n")
602
603 throw("morestack on g0")
604 })
605 }
606
607
608
609 func badmorestackgsignal() {
610 writeErrStr("fatal: morestack on gsignal\n")
611 }
612
613
614 func badctxt() {
615 throw("ctxt != 0")
616 }
617
618
619
620 var gcrash g
621
622 var crashingG atomic.Pointer[g]
623
624
625
626
627
628
629
630
631
632 func switchToCrashStack(fn func()) {
633 me := getg()
634 if crashingG.CompareAndSwapNoWB(nil, me) {
635 switchToCrashStack0(fn)
636 abort()
637 }
638 if crashingG.Load() == me {
639
640 writeErrStr("fatal: recursive switchToCrashStack\n")
641 abort()
642 }
643
644 usleep_no_g(100)
645 writeErrStr("fatal: concurrent switchToCrashStack\n")
646 abort()
647 }
648
649
650
651
652 const crashStackImplemented = GOOS != "windows"
653
654
655 func switchToCrashStack0(fn func())
656
657 func lockedOSThread() bool {
658 gp := getg()
659 return gp.lockedm != 0 && gp.m.lockedg != 0
660 }
661
662 var (
663
664
665
666
667
668
669 allglock mutex
670 allgs []*g
671
672
673
674
675
676
677
678
679
680
681
682
683
684 allglen uintptr
685 allgptr **g
686 )
687
688 func allgadd(gp *g) {
689 if readgstatus(gp) == _Gidle {
690 throw("allgadd: bad status Gidle")
691 }
692
693 lock(&allglock)
694 allgs = append(allgs, gp)
695 if &allgs[0] != allgptr {
696 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
697 }
698 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
699 unlock(&allglock)
700 }
701
702
703
704
705 func allGsSnapshot() []*g {
706 assertWorldStoppedOrLockHeld(&allglock)
707
708
709
710
711
712
713 return allgs[:len(allgs):len(allgs)]
714 }
715
716
717 func atomicAllG() (**g, uintptr) {
718 length := atomic.Loaduintptr(&allglen)
719 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
720 return ptr, length
721 }
722
723
724 func atomicAllGIndex(ptr **g, i uintptr) *g {
725 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
726 }
727
728
729
730
731 func forEachG(fn func(gp *g)) {
732 lock(&allglock)
733 for _, gp := range allgs {
734 fn(gp)
735 }
736 unlock(&allglock)
737 }
738
739
740
741
742
743 func forEachGRace(fn func(gp *g)) {
744 ptr, length := atomicAllG()
745 for i := uintptr(0); i < length; i++ {
746 gp := atomicAllGIndex(ptr, i)
747 fn(gp)
748 }
749 return
750 }
751
752 const (
753
754
755 _GoidCacheBatch = 16
756 )
757
758
759
760 func cpuinit(env string) {
761 cpu.Initialize(env)
762
763
764
765 switch GOARCH {
766 case "386", "amd64":
767 x86HasAVX = cpu.X86.HasAVX
768 x86HasFMA = cpu.X86.HasFMA
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771
772 case "arm":
773 armHasVFPv4 = cpu.ARM.HasVFPv4
774
775 case "arm64":
776 arm64HasATOMICS = cpu.ARM64.HasATOMICS
777
778 case "loong64":
779 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
780 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
781 loong64HasLSX = cpu.Loong64.HasLSX
782
783 case "riscv64":
784 riscv64HasZbb = cpu.RISCV64.HasZbb
785 }
786 }
787
788
789
790
791
792
793 func getGodebugEarly() (string, bool) {
794 const prefix = "GODEBUG="
795 var env string
796 switch GOOS {
797 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
798
799
800
801 n := int32(0)
802 for argv_index(argv, argc+1+n) != nil {
803 n++
804 }
805
806 for i := int32(0); i < n; i++ {
807 p := argv_index(argv, argc+1+i)
808 s := unsafe.String(p, findnull(p))
809
810 if stringslite.HasPrefix(s, prefix) {
811 env = gostringnocopy(p)[len(prefix):]
812 break
813 }
814 }
815 break
816
817 default:
818 return "", false
819 }
820 return env, true
821 }
822
823
824
825
826
827
828
829
830
831 func schedinit() {
832 lockInit(&sched.lock, lockRankSched)
833 lockInit(&sched.sysmonlock, lockRankSysmon)
834 lockInit(&sched.deferlock, lockRankDefer)
835 lockInit(&sched.sudoglock, lockRankSudog)
836 lockInit(&deadlock, lockRankDeadlock)
837 lockInit(&paniclk, lockRankPanic)
838 lockInit(&allglock, lockRankAllg)
839 lockInit(&allpLock, lockRankAllp)
840 lockInit(&reflectOffs.lock, lockRankReflectOffs)
841 lockInit(&finlock, lockRankFin)
842 lockInit(&cpuprof.lock, lockRankCpuprof)
843 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
844 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
845 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
846 traceLockInit()
847
848
849
850 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
851
852 lockVerifyMSize()
853
854 sched.midle.init(unsafe.Offsetof(m{}.idleNode))
855
856
857
858 gp := getg()
859 if raceenabled {
860 gp.racectx, raceprocctx0 = raceinit()
861 }
862
863 sched.maxmcount = 10000
864 crashFD.Store(^uintptr(0))
865
866
867 worldStopped()
868
869 godebug, parsedGodebug := getGodebugEarly()
870 if parsedGodebug {
871 parseRuntimeDebugVars(godebug)
872 }
873 ticks.init()
874 moduledataverify()
875 stackinit()
876 randinit()
877 mallocinit()
878 cpuinit(godebug)
879 alginit()
880 mcommoninit(gp.m, -1)
881 modulesinit()
882 typelinksinit()
883 itabsinit()
884 stkobjinit()
885
886 sigsave(&gp.m.sigmask)
887 initSigmask = gp.m.sigmask
888
889 goargs()
890 goenvs()
891 secure()
892 checkfds()
893 if !parsedGodebug {
894
895
896 parseRuntimeDebugVars(gogetenv("GODEBUG"))
897 }
898 finishDebugVarsSetup()
899 gcinit()
900
901
902
903 gcrash.stack = stackalloc(16384)
904 gcrash.stackguard0 = gcrash.stack.lo + 1000
905 gcrash.stackguard1 = gcrash.stack.lo + 1000
906
907
908
909
910
911 if disableMemoryProfiling {
912 MemProfileRate = 0
913 }
914
915
916 mProfStackInit(gp.m)
917 defaultGOMAXPROCSInit()
918
919 lock(&sched.lock)
920 sched.lastpoll.Store(nanotime())
921 var procs int32
922 if n, err := strconv.ParseInt(gogetenv("GOMAXPROCS"), 10, 32); err == nil && n > 0 {
923 procs = int32(n)
924 sched.customGOMAXPROCS = true
925 } else {
926
927
928
929
930
931
932
933
934 procs = defaultGOMAXPROCS(numCPUStartup)
935 }
936 if procresize(procs) != nil {
937 throw("unknown runnable goroutine during bootstrap")
938 }
939 unlock(&sched.lock)
940
941
942 worldStarted()
943
944 if buildVersion == "" {
945
946
947 buildVersion = "unknown"
948 }
949 if len(modinfo) == 1 {
950
951
952 modinfo = ""
953 }
954 }
955
956 func dumpgstatus(gp *g) {
957 thisg := getg()
958 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
959 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
960 }
961
962
963 func checkmcount() {
964 assertLockHeld(&sched.lock)
965
966
967
968
969
970
971
972
973
974 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
975 if count > sched.maxmcount {
976 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
977 throw("thread exhaustion")
978 }
979 }
980
981
982
983
984
985 func mReserveID() int64 {
986 assertLockHeld(&sched.lock)
987
988 if sched.mnext+1 < sched.mnext {
989 throw("runtime: thread ID overflow")
990 }
991 id := sched.mnext
992 sched.mnext++
993 checkmcount()
994 return id
995 }
996
997
998 func mcommoninit(mp *m, id int64) {
999 gp := getg()
1000
1001
1002 if gp != gp.m.g0 {
1003 callers(1, mp.createstack[:])
1004 }
1005
1006 lock(&sched.lock)
1007
1008 if id >= 0 {
1009 mp.id = id
1010 } else {
1011 mp.id = mReserveID()
1012 }
1013
1014 mp.self = newMWeakPointer(mp)
1015
1016 mrandinit(mp)
1017
1018 mpreinit(mp)
1019 if mp.gsignal != nil {
1020 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1021 }
1022
1023
1024
1025 mp.alllink = allm
1026
1027
1028
1029 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1030 unlock(&sched.lock)
1031
1032
1033 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1034 mp.cgoCallers = new(cgoCallers)
1035 }
1036 mProfStackInit(mp)
1037 }
1038
1039
1040
1041
1042
1043 func mProfStackInit(mp *m) {
1044 if debug.profstackdepth == 0 {
1045
1046
1047 return
1048 }
1049 mp.profStack = makeProfStackFP()
1050 mp.mLockProfile.stack = makeProfStackFP()
1051 }
1052
1053
1054
1055
1056 func makeProfStackFP() []uintptr {
1057
1058
1059
1060
1061
1062
1063 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1064 }
1065
1066
1067
1068 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1069
1070
1071 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1072
1073 func (mp *m) becomeSpinning() {
1074 mp.spinning = true
1075 sched.nmspinning.Add(1)
1076 sched.needspinning.Store(0)
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086 func (mp *m) snapshotAllp() []*p {
1087 mp.allpSnapshot = allp
1088 return mp.allpSnapshot
1089 }
1090
1091
1092
1093
1094
1095
1096
1097 func (mp *m) clearAllpSnapshot() {
1098 mp.allpSnapshot = nil
1099 }
1100
1101 func (mp *m) hasCgoOnStack() bool {
1102 return mp.ncgo > 0 || mp.isextra
1103 }
1104
1105 const (
1106
1107
1108 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1109
1110
1111
1112 osHasLowResClockInt = goos.IsWindows
1113
1114
1115
1116 osHasLowResClock = osHasLowResClockInt > 0
1117 )
1118
1119
1120 func ready(gp *g, traceskip int, next bool) {
1121 status := readgstatus(gp)
1122
1123
1124 mp := acquirem()
1125 if status&^_Gscan != _Gwaiting {
1126 dumpgstatus(gp)
1127 throw("bad g->status in ready")
1128 }
1129
1130
1131 trace := traceAcquire()
1132 casgstatus(gp, _Gwaiting, _Grunnable)
1133 if trace.ok() {
1134 trace.GoUnpark(gp, traceskip)
1135 traceRelease(trace)
1136 }
1137 runqput(mp.p.ptr(), gp, next)
1138 wakep()
1139 releasem(mp)
1140 }
1141
1142
1143
1144 const freezeStopWait = 0x7fffffff
1145
1146
1147
1148 var freezing atomic.Bool
1149
1150
1151
1152
1153 func freezetheworld() {
1154 freezing.Store(true)
1155 if debug.dontfreezetheworld > 0 {
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 usleep(1000)
1181 return
1182 }
1183
1184
1185
1186
1187 for i := 0; i < 5; i++ {
1188
1189 sched.stopwait = freezeStopWait
1190 sched.gcwaiting.Store(true)
1191
1192 if !preemptall() {
1193 break
1194 }
1195 usleep(1000)
1196 }
1197
1198 usleep(1000)
1199 preemptall()
1200 usleep(1000)
1201 }
1202
1203
1204
1205
1206
1207 func readgstatus(gp *g) uint32 {
1208 return gp.atomicstatus.Load()
1209 }
1210
1211
1212
1213
1214
1215 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1216 success := false
1217
1218
1219 switch oldval {
1220 default:
1221 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1222 dumpgstatus(gp)
1223 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1224 case _Gscanrunnable,
1225 _Gscanwaiting,
1226 _Gscanrunning,
1227 _Gscansyscall,
1228 _Gscanleaked,
1229 _Gscanpreempted,
1230 _Gscandeadextra:
1231 if newval == oldval&^_Gscan {
1232 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1233 }
1234 }
1235 if !success {
1236 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1237 dumpgstatus(gp)
1238 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1239 }
1240 releaseLockRankAndM(lockRankGscan)
1241 }
1242
1243
1244
1245 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1246 switch oldval {
1247 case _Grunnable,
1248 _Grunning,
1249 _Gwaiting,
1250 _Gleaked,
1251 _Gsyscall,
1252 _Gdeadextra:
1253 if newval == oldval|_Gscan {
1254 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1255 if r {
1256 acquireLockRankAndM(lockRankGscan)
1257 }
1258 return r
1259
1260 }
1261 }
1262 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1263 throw("bad oldval passed to castogscanstatus")
1264 return false
1265 }
1266
1267
1268
1269 var casgstatusAlwaysTrack = false
1270
1271
1272
1273
1274
1275
1276
1277 func casgstatus(gp *g, oldval, newval uint32) {
1278 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1279 systemstack(func() {
1280
1281
1282 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1283 throw("casgstatus: bad incoming values")
1284 })
1285 }
1286
1287 lockWithRankMayAcquire(nil, lockRankGscan)
1288
1289
1290 const yieldDelay = 5 * 1000
1291 var nextYield int64
1292
1293
1294
1295 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1296 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1297 systemstack(func() {
1298
1299
1300 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1301 })
1302 }
1303 if i == 0 {
1304 nextYield = nanotime() + yieldDelay
1305 }
1306 if nanotime() < nextYield {
1307 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1308 procyield(1)
1309 }
1310 } else {
1311 osyield()
1312 nextYield = nanotime() + yieldDelay/2
1313 }
1314 }
1315
1316 if gp.bubble != nil {
1317 systemstack(func() {
1318 gp.bubble.changegstatus(gp, oldval, newval)
1319 })
1320 }
1321
1322 if (oldval == _Grunning || oldval == _Gsyscall) && (newval != _Grunning && newval != _Gsyscall) {
1323
1324
1325 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1326 gp.tracking = true
1327 }
1328 gp.trackingSeq++
1329 }
1330 if !gp.tracking {
1331 return
1332 }
1333
1334
1335
1336
1337
1338
1339 switch oldval {
1340 case _Grunnable:
1341
1342
1343
1344 now := nanotime()
1345 gp.runnableTime += now - gp.trackingStamp
1346 gp.trackingStamp = 0
1347 case _Gwaiting:
1348 if !gp.waitreason.isMutexWait() {
1349
1350 break
1351 }
1352
1353
1354
1355
1356
1357 now := nanotime()
1358 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1359 gp.trackingStamp = 0
1360 }
1361 switch newval {
1362 case _Gwaiting:
1363 if !gp.waitreason.isMutexWait() {
1364
1365 break
1366 }
1367
1368 now := nanotime()
1369 gp.trackingStamp = now
1370 case _Grunnable:
1371
1372
1373 now := nanotime()
1374 gp.trackingStamp = now
1375 case _Grunning:
1376
1377
1378
1379 gp.tracking = false
1380 sched.timeToRun.record(gp.runnableTime)
1381 gp.runnableTime = 0
1382 }
1383 }
1384
1385
1386
1387
1388 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1389
1390 gp.waitreason = reason
1391 casgstatus(gp, old, _Gwaiting)
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1402 if !reason.isWaitingForSuspendG() {
1403 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1404 }
1405 casGToWaiting(gp, old, reason)
1406 }
1407
1408
1409
1410
1411
1412 func casGToPreemptScan(gp *g, old, new uint32) {
1413 if old != _Grunning || new != _Gscan|_Gpreempted {
1414 throw("bad g transition")
1415 }
1416 acquireLockRankAndM(lockRankGscan)
1417 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1418 }
1419
1420
1421
1422
1423
1424
1425 }
1426
1427
1428
1429
1430 func casGFromPreempted(gp *g, old, new uint32) bool {
1431 if old != _Gpreempted || new != _Gwaiting {
1432 throw("bad g transition")
1433 }
1434 gp.waitreason = waitReasonPreempted
1435 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1436 return false
1437 }
1438 if bubble := gp.bubble; bubble != nil {
1439 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1440 }
1441 return true
1442 }
1443
1444
1445 type stwReason uint8
1446
1447
1448
1449
1450 const (
1451 stwUnknown stwReason = iota
1452 stwGCMarkTerm
1453 stwGCSweepTerm
1454 stwWriteHeapDump
1455 stwGoroutineProfile
1456 stwGoroutineProfileCleanup
1457 stwAllGoroutinesStack
1458 stwReadMemStats
1459 stwAllThreadsSyscall
1460 stwGOMAXPROCS
1461 stwStartTrace
1462 stwStopTrace
1463 stwForTestCountPagesInUse
1464 stwForTestReadMetricsSlow
1465 stwForTestReadMemStatsSlow
1466 stwForTestPageCachePagesLeaked
1467 stwForTestResetDebugLog
1468 )
1469
1470 func (r stwReason) String() string {
1471 return stwReasonStrings[r]
1472 }
1473
1474 func (r stwReason) isGC() bool {
1475 return r == stwGCMarkTerm || r == stwGCSweepTerm
1476 }
1477
1478
1479
1480
1481 var stwReasonStrings = [...]string{
1482 stwUnknown: "unknown",
1483 stwGCMarkTerm: "GC mark termination",
1484 stwGCSweepTerm: "GC sweep termination",
1485 stwWriteHeapDump: "write heap dump",
1486 stwGoroutineProfile: "goroutine profile",
1487 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1488 stwAllGoroutinesStack: "all goroutines stack trace",
1489 stwReadMemStats: "read mem stats",
1490 stwAllThreadsSyscall: "AllThreadsSyscall",
1491 stwGOMAXPROCS: "GOMAXPROCS",
1492 stwStartTrace: "start trace",
1493 stwStopTrace: "stop trace",
1494 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1495 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1496 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1497 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1498 stwForTestResetDebugLog: "ResetDebugLog (test)",
1499 }
1500
1501
1502
1503 type worldStop struct {
1504 reason stwReason
1505 startedStopping int64
1506 finishedStopping int64
1507 stoppingCPUTime int64
1508 }
1509
1510
1511
1512
1513 var stopTheWorldContext worldStop
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 func stopTheWorld(reason stwReason) worldStop {
1533 semacquire(&worldsema)
1534 gp := getg()
1535 gp.m.preemptoff = reason.String()
1536 systemstack(func() {
1537 stopTheWorldContext = stopTheWorldWithSema(reason)
1538 })
1539 return stopTheWorldContext
1540 }
1541
1542
1543
1544
1545 func startTheWorld(w worldStop) {
1546 systemstack(func() { startTheWorldWithSema(0, w) })
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 mp := acquirem()
1564 mp.preemptoff = ""
1565 semrelease1(&worldsema, true, 0)
1566 releasem(mp)
1567 }
1568
1569
1570
1571
1572 func stopTheWorldGC(reason stwReason) worldStop {
1573 semacquire(&gcsema)
1574 return stopTheWorld(reason)
1575 }
1576
1577
1578
1579
1580 func startTheWorldGC(w worldStop) {
1581 startTheWorld(w)
1582 semrelease(&gcsema)
1583 }
1584
1585
1586 var worldsema uint32 = 1
1587
1588
1589
1590
1591
1592
1593
1594 var gcsema uint32 = 1
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 func stopTheWorldWithSema(reason stwReason) worldStop {
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1642
1643 trace := traceAcquire()
1644 if trace.ok() {
1645 trace.STWStart(reason)
1646 traceRelease(trace)
1647 }
1648 gp := getg()
1649
1650
1651
1652 if gp.m.locks > 0 {
1653 throw("stopTheWorld: holding locks")
1654 }
1655
1656 lock(&sched.lock)
1657 start := nanotime()
1658 sched.stopwait = gomaxprocs
1659 sched.gcwaiting.Store(true)
1660 preemptall()
1661
1662
1663 gp.m.p.ptr().status = _Pgcstop
1664 gp.m.p.ptr().gcStopTime = start
1665 sched.stopwait--
1666
1667
1668 for _, pp := range allp {
1669 if thread, ok := setBlockOnExitSyscall(pp); ok {
1670 thread.gcstopP()
1671 thread.resume()
1672 }
1673 }
1674
1675
1676 now := nanotime()
1677 for {
1678 pp, _ := pidleget(now)
1679 if pp == nil {
1680 break
1681 }
1682 pp.status = _Pgcstop
1683 pp.gcStopTime = nanotime()
1684 sched.stopwait--
1685 }
1686 wait := sched.stopwait > 0
1687 unlock(&sched.lock)
1688
1689
1690 if wait {
1691 for {
1692
1693 if notetsleep(&sched.stopnote, 100*1000) {
1694 noteclear(&sched.stopnote)
1695 break
1696 }
1697 preemptall()
1698 }
1699 }
1700
1701 finish := nanotime()
1702 startTime := finish - start
1703 if reason.isGC() {
1704 sched.stwStoppingTimeGC.record(startTime)
1705 } else {
1706 sched.stwStoppingTimeOther.record(startTime)
1707 }
1708
1709
1710
1711
1712
1713 stoppingCPUTime := int64(0)
1714 bad := ""
1715 if sched.stopwait != 0 {
1716 bad = "stopTheWorld: not stopped (stopwait != 0)"
1717 } else {
1718 for _, pp := range allp {
1719 if pp.status != _Pgcstop {
1720 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1721 }
1722 if pp.gcStopTime == 0 && bad == "" {
1723 bad = "stopTheWorld: broken CPU time accounting"
1724 }
1725 stoppingCPUTime += finish - pp.gcStopTime
1726 pp.gcStopTime = 0
1727 }
1728 }
1729 if freezing.Load() {
1730
1731
1732
1733
1734 lock(&deadlock)
1735 lock(&deadlock)
1736 }
1737 if bad != "" {
1738 throw(bad)
1739 }
1740
1741 worldStopped()
1742
1743
1744 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1745
1746 return worldStop{
1747 reason: reason,
1748 startedStopping: start,
1749 finishedStopping: finish,
1750 stoppingCPUTime: stoppingCPUTime,
1751 }
1752 }
1753
1754
1755
1756
1757
1758
1759
1760 func startTheWorldWithSema(now int64, w worldStop) int64 {
1761 assertWorldStopped()
1762
1763 mp := acquirem()
1764 if netpollinited() {
1765 list, delta := netpoll(0)
1766 injectglist(&list)
1767 netpollAdjustWaiters(delta)
1768 }
1769 lock(&sched.lock)
1770
1771 procs := gomaxprocs
1772 if newprocs != 0 {
1773 procs = newprocs
1774 newprocs = 0
1775 }
1776 p1 := procresize(procs)
1777 sched.gcwaiting.Store(false)
1778 if sched.sysmonwait.Load() {
1779 sched.sysmonwait.Store(false)
1780 notewakeup(&sched.sysmonnote)
1781 }
1782 unlock(&sched.lock)
1783
1784 worldStarted()
1785
1786 for p1 != nil {
1787 p := p1
1788 p1 = p1.link.ptr()
1789 if p.m != 0 {
1790 mp := p.m.ptr()
1791 p.m = 0
1792 if mp.nextp != 0 {
1793 throw("startTheWorld: inconsistent mp->nextp")
1794 }
1795 mp.nextp.set(p)
1796 notewakeup(&mp.park)
1797 } else {
1798
1799 newm(nil, p, -1)
1800 }
1801 }
1802
1803
1804 if now == 0 {
1805 now = nanotime()
1806 }
1807 totalTime := now - w.startedStopping
1808 if w.reason.isGC() {
1809 sched.stwTotalTimeGC.record(totalTime)
1810 } else {
1811 sched.stwTotalTimeOther.record(totalTime)
1812 }
1813 trace := traceAcquire()
1814 if trace.ok() {
1815 trace.STWDone()
1816 traceRelease(trace)
1817 }
1818
1819
1820
1821
1822 wakep()
1823
1824 releasem(mp)
1825
1826 return now
1827 }
1828
1829
1830
1831 func usesLibcall() bool {
1832 switch GOOS {
1833 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1834 return true
1835 }
1836 return false
1837 }
1838
1839
1840
1841 func mStackIsSystemAllocated() bool {
1842 switch GOOS {
1843 case "aix", "darwin", "plan9", "illumos", "ios", "openbsd", "solaris", "windows":
1844 return true
1845 }
1846 return false
1847 }
1848
1849
1850
1851 func mstart()
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 func mstart0() {
1863 gp := getg()
1864
1865 osStack := gp.stack.lo == 0
1866 if osStack {
1867
1868
1869
1870
1871
1872
1873
1874
1875 size := gp.stack.hi
1876 if size == 0 {
1877 size = 16384 * sys.StackGuardMultiplier
1878 }
1879 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1880 gp.stack.lo = gp.stack.hi - size + 1024
1881 }
1882
1883
1884 gp.stackguard0 = gp.stack.lo + stackGuard
1885
1886
1887 gp.stackguard1 = gp.stackguard0
1888 mstart1()
1889
1890
1891 if mStackIsSystemAllocated() {
1892
1893
1894
1895 osStack = true
1896 }
1897 mexit(osStack)
1898 }
1899
1900
1901
1902
1903
1904 func mstart1() {
1905 gp := getg()
1906
1907 if gp != gp.m.g0 {
1908 throw("bad runtime·mstart")
1909 }
1910
1911
1912
1913
1914
1915
1916
1917 gp.sched.g = guintptr(unsafe.Pointer(gp))
1918 gp.sched.pc = sys.GetCallerPC()
1919 gp.sched.sp = sys.GetCallerSP()
1920
1921 asminit()
1922 minit()
1923
1924
1925
1926 if gp.m == &m0 {
1927 mstartm0()
1928 }
1929
1930 if debug.dataindependenttiming == 1 {
1931 sys.EnableDIT()
1932 }
1933
1934 if fn := gp.m.mstartfn; fn != nil {
1935 fn()
1936 }
1937
1938 if gp.m != &m0 {
1939 acquirep(gp.m.nextp.ptr())
1940 gp.m.nextp = 0
1941 }
1942 schedule()
1943 }
1944
1945
1946
1947
1948
1949
1950
1951 func mstartm0() {
1952
1953
1954
1955 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1956 cgoHasExtraM = true
1957 newextram()
1958 }
1959 initsig(false)
1960 }
1961
1962
1963
1964
1965 func mPark() {
1966 gp := getg()
1967 notesleep(&gp.m.park)
1968 noteclear(&gp.m.park)
1969 }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981 func mexit(osStack bool) {
1982 mp := getg().m
1983
1984 if mp == &m0 {
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 handoffp(releasep())
1997 lock(&sched.lock)
1998 sched.nmfreed++
1999 checkdead()
2000 unlock(&sched.lock)
2001 mPark()
2002 throw("locked m0 woke up")
2003 }
2004
2005 sigblock(true)
2006 unminit()
2007
2008
2009 if mp.gsignal != nil {
2010 stackfree(mp.gsignal.stack)
2011 if valgrindenabled {
2012 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2013 mp.gsignal.valgrindStackID = 0
2014 }
2015
2016
2017
2018
2019 mp.gsignal = nil
2020 }
2021
2022
2023 vgetrandomDestroy(mp)
2024
2025
2026
2027 mp.self.clear()
2028
2029
2030 lock(&sched.lock)
2031 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2032 if *pprev == mp {
2033 *pprev = mp.alllink
2034 goto found
2035 }
2036 }
2037 throw("m not found in allm")
2038 found:
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 mp.freeWait.Store(freeMWait)
2054 mp.freelink = sched.freem
2055 sched.freem = mp
2056 unlock(&sched.lock)
2057
2058 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2059 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2060
2061
2062 handoffp(releasep())
2063
2064
2065
2066
2067
2068 lock(&sched.lock)
2069 sched.nmfreed++
2070 checkdead()
2071 unlock(&sched.lock)
2072
2073 if GOOS == "darwin" || GOOS == "ios" {
2074
2075
2076 if mp.signalPending.Load() != 0 {
2077 pendingPreemptSignals.Add(-1)
2078 }
2079 }
2080
2081
2082
2083 mdestroy(mp)
2084
2085 if osStack {
2086
2087 mp.freeWait.Store(freeMRef)
2088
2089
2090
2091 return
2092 }
2093
2094
2095
2096
2097
2098 exitThread(&mp.freeWait)
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 func forEachP(reason waitReason, fn func(*p)) {
2112 systemstack(func() {
2113 gp := getg().m.curg
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 casGToWaitingForSuspendG(gp, _Grunning, reason)
2126 forEachPInternal(fn)
2127 casgstatus(gp, _Gwaiting, _Grunning)
2128 })
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 func forEachPInternal(fn func(*p)) {
2141 mp := acquirem()
2142 pp := getg().m.p.ptr()
2143
2144 lock(&sched.lock)
2145 if sched.safePointWait != 0 {
2146 throw("forEachP: sched.safePointWait != 0")
2147 }
2148 sched.safePointWait = gomaxprocs - 1
2149 sched.safePointFn = fn
2150
2151
2152 for _, p2 := range allp {
2153 if p2 != pp {
2154 atomic.Store(&p2.runSafePointFn, 1)
2155 }
2156 }
2157 preemptall()
2158
2159
2160
2161
2162
2163
2164
2165 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2166 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2167 fn(p)
2168 sched.safePointWait--
2169 }
2170 }
2171
2172 wait := sched.safePointWait > 0
2173 unlock(&sched.lock)
2174
2175
2176 fn(pp)
2177
2178
2179
2180 for _, p2 := range allp {
2181 if atomic.Load(&p2.runSafePointFn) != 1 {
2182
2183 continue
2184 }
2185 if thread, ok := setBlockOnExitSyscall(p2); ok {
2186 thread.takeP()
2187 thread.resume()
2188 handoffp(p2)
2189 }
2190 }
2191
2192
2193 if wait {
2194 for {
2195
2196
2197
2198
2199 if notetsleep(&sched.safePointNote, 100*1000) {
2200 noteclear(&sched.safePointNote)
2201 break
2202 }
2203 preemptall()
2204 }
2205 }
2206 if sched.safePointWait != 0 {
2207 throw("forEachP: not done")
2208 }
2209 for _, p2 := range allp {
2210 if p2.runSafePointFn != 0 {
2211 throw("forEachP: P did not run fn")
2212 }
2213 }
2214
2215 lock(&sched.lock)
2216 sched.safePointFn = nil
2217 unlock(&sched.lock)
2218 releasem(mp)
2219 }
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 func runSafePointFn() {
2233 p := getg().m.p.ptr()
2234
2235
2236
2237 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2238 return
2239 }
2240 sched.safePointFn(p)
2241 lock(&sched.lock)
2242 sched.safePointWait--
2243 if sched.safePointWait == 0 {
2244 notewakeup(&sched.safePointNote)
2245 }
2246 unlock(&sched.lock)
2247 }
2248
2249
2250
2251
2252 var cgoThreadStart unsafe.Pointer
2253
2254 type cgothreadstart struct {
2255 g guintptr
2256 tls *uint64
2257 fn unsafe.Pointer
2258 }
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 func allocm(pp *p, fn func(), id int64) *m {
2270 allocmLock.rlock()
2271
2272
2273
2274
2275 acquirem()
2276
2277 gp := getg()
2278 if gp.m.p == 0 {
2279 acquirep(pp)
2280 }
2281
2282
2283
2284 if sched.freem != nil {
2285 lock(&sched.lock)
2286 var newList *m
2287 for freem := sched.freem; freem != nil; {
2288
2289 wait := freem.freeWait.Load()
2290 if wait == freeMWait {
2291 next := freem.freelink
2292 freem.freelink = newList
2293 newList = freem
2294 freem = next
2295 continue
2296 }
2297
2298
2299
2300 if traceEnabled() || traceShuttingDown() {
2301 traceThreadDestroy(freem)
2302 }
2303
2304
2305
2306 if wait == freeMStack {
2307
2308
2309
2310 systemstack(func() {
2311 stackfree(freem.g0.stack)
2312 if valgrindenabled {
2313 valgrindDeregisterStack(freem.g0.valgrindStackID)
2314 freem.g0.valgrindStackID = 0
2315 }
2316 })
2317 }
2318 freem = freem.freelink
2319 }
2320 sched.freem = newList
2321 unlock(&sched.lock)
2322 }
2323
2324 mp := &new(mPadded).m
2325 mp.mstartfn = fn
2326 mcommoninit(mp, id)
2327
2328
2329
2330 if iscgo || mStackIsSystemAllocated() {
2331 mp.g0 = malg(-1)
2332 } else {
2333 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2334 }
2335 mp.g0.m = mp
2336
2337 if pp == gp.m.p.ptr() {
2338 releasep()
2339 }
2340
2341 releasem(gp.m)
2342 allocmLock.runlock()
2343 return mp
2344 }
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385 func needm(signal bool) {
2386 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2387
2388
2389
2390
2391
2392
2393 writeErrStr("fatal error: cgo callback before cgo call\n")
2394 exit(1)
2395 }
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 var sigmask sigset
2406 sigsave(&sigmask)
2407 sigblock(false)
2408
2409
2410
2411
2412 mp, last := getExtraM()
2413
2414
2415
2416
2417
2418
2419
2420
2421 mp.needextram = last
2422
2423
2424 mp.sigmask = sigmask
2425
2426
2427
2428 osSetupTLS(mp)
2429
2430
2431
2432 setg(mp.g0)
2433 sp := sys.GetCallerSP()
2434 callbackUpdateSystemStack(mp, sp, signal)
2435
2436
2437
2438
2439 mp.isExtraInC = false
2440
2441
2442 asminit()
2443 minit()
2444
2445
2446
2447
2448
2449
2450 var trace traceLocker
2451 if !signal {
2452 trace = traceAcquire()
2453 }
2454
2455
2456 casgstatus(mp.curg, _Gdeadextra, _Gsyscall)
2457 sched.ngsys.Add(-1)
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467 addGSyscallNoP(mp)
2468
2469 if !signal {
2470 if trace.ok() {
2471 trace.GoCreateSyscall(mp.curg)
2472 traceRelease(trace)
2473 }
2474 }
2475 mp.isExtraInSig = signal
2476 }
2477
2478
2479
2480
2481 func needAndBindM() {
2482 needm(false)
2483
2484 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2485 cgoBindM()
2486 }
2487 }
2488
2489
2490
2491
2492 func newextram() {
2493 c := extraMWaiters.Swap(0)
2494 if c > 0 {
2495 for i := uint32(0); i < c; i++ {
2496 oneNewExtraM()
2497 }
2498 } else if extraMLength.Load() == 0 {
2499
2500 oneNewExtraM()
2501 }
2502 }
2503
2504
2505 func oneNewExtraM() {
2506
2507
2508
2509
2510
2511 mp := allocm(nil, nil, -1)
2512 gp := malg(4096)
2513 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2514 gp.sched.sp = gp.stack.hi
2515 gp.sched.sp -= 4 * goarch.PtrSize
2516 gp.sched.lr = 0
2517 gp.sched.g = guintptr(unsafe.Pointer(gp))
2518 gp.syscallpc = gp.sched.pc
2519 gp.syscallsp = gp.sched.sp
2520 gp.stktopsp = gp.sched.sp
2521
2522
2523
2524 casgstatus(gp, _Gidle, _Gdeadextra)
2525 gp.m = mp
2526 mp.curg = gp
2527 mp.isextra = true
2528
2529 mp.isExtraInC = true
2530 mp.lockedInt++
2531 mp.lockedg.set(gp)
2532 gp.lockedm.set(mp)
2533 gp.goid = sched.goidgen.Add(1)
2534 if raceenabled {
2535 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2536 }
2537
2538 allgadd(gp)
2539
2540
2541
2542
2543
2544 sched.ngsys.Add(1)
2545
2546
2547 addExtraM(mp)
2548 }
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583 func dropm() {
2584
2585
2586
2587 mp := getg().m
2588
2589
2590
2591
2592
2593 var trace traceLocker
2594 if !mp.isExtraInSig {
2595 trace = traceAcquire()
2596 }
2597
2598
2599 casgstatus(mp.curg, _Gsyscall, _Gdeadextra)
2600 mp.curg.preemptStop = false
2601 sched.ngsys.Add(1)
2602 decGSyscallNoP(mp)
2603
2604 if !mp.isExtraInSig {
2605 if trace.ok() {
2606 trace.GoDestroySyscall()
2607 traceRelease(trace)
2608 }
2609 }
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624 mp.syscalltick--
2625
2626
2627
2628 mp.curg.trace.reset()
2629
2630
2631
2632
2633 if traceEnabled() || traceShuttingDown() {
2634
2635
2636
2637
2638
2639
2640
2641 lock(&sched.lock)
2642 traceThreadDestroy(mp)
2643 unlock(&sched.lock)
2644 }
2645 mp.isExtraInSig = false
2646
2647
2648
2649
2650
2651 sigmask := mp.sigmask
2652 sigblock(false)
2653 unminit()
2654
2655 setg(nil)
2656
2657
2658
2659 g0 := mp.g0
2660 g0.stack.hi = 0
2661 g0.stack.lo = 0
2662 g0.stackguard0 = 0
2663 g0.stackguard1 = 0
2664 mp.g0StackAccurate = false
2665
2666 putExtraM(mp)
2667
2668 msigrestore(sigmask)
2669 }
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691 func cgoBindM() {
2692 if GOOS == "windows" || GOOS == "plan9" {
2693 fatal("bindm in unexpected GOOS")
2694 }
2695 g := getg()
2696 if g.m.g0 != g {
2697 fatal("the current g is not g0")
2698 }
2699 if _cgo_bindm != nil {
2700 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2701 }
2702 }
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715 func getm() uintptr {
2716 return uintptr(unsafe.Pointer(getg().m))
2717 }
2718
2719 var (
2720
2721
2722
2723
2724
2725
2726 extraM atomic.Uintptr
2727
2728 extraMLength atomic.Uint32
2729
2730 extraMWaiters atomic.Uint32
2731
2732
2733 extraMInUse atomic.Uint32
2734 )
2735
2736
2737
2738
2739
2740
2741
2742
2743 func lockextra(nilokay bool) *m {
2744 const locked = 1
2745
2746 incr := false
2747 for {
2748 old := extraM.Load()
2749 if old == locked {
2750 osyield_no_g()
2751 continue
2752 }
2753 if old == 0 && !nilokay {
2754 if !incr {
2755
2756
2757
2758 extraMWaiters.Add(1)
2759 incr = true
2760 }
2761 usleep_no_g(1)
2762 continue
2763 }
2764 if extraM.CompareAndSwap(old, locked) {
2765 return (*m)(unsafe.Pointer(old))
2766 }
2767 osyield_no_g()
2768 continue
2769 }
2770 }
2771
2772
2773 func unlockextra(mp *m, delta int32) {
2774 extraMLength.Add(delta)
2775 extraM.Store(uintptr(unsafe.Pointer(mp)))
2776 }
2777
2778
2779
2780
2781
2782
2783
2784
2785 func getExtraM() (mp *m, last bool) {
2786 mp = lockextra(false)
2787 extraMInUse.Add(1)
2788 unlockextra(mp.schedlink.ptr(), -1)
2789 return mp, mp.schedlink.ptr() == nil
2790 }
2791
2792
2793
2794
2795
2796 func putExtraM(mp *m) {
2797 extraMInUse.Add(-1)
2798 addExtraM(mp)
2799 }
2800
2801
2802
2803
2804 func addExtraM(mp *m) {
2805 mnext := lockextra(true)
2806 mp.schedlink.set(mnext)
2807 unlockextra(mp, 1)
2808 }
2809
2810 var (
2811
2812
2813
2814 allocmLock rwmutex
2815
2816
2817
2818
2819 execLock rwmutex
2820 )
2821
2822
2823
2824 const (
2825 failthreadcreate = "runtime: failed to create new OS thread\n"
2826 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2827 )
2828
2829
2830
2831
2832 var newmHandoff struct {
2833 lock mutex
2834
2835
2836
2837 newm muintptr
2838
2839
2840
2841 waiting bool
2842 wake note
2843
2844
2845
2846
2847 haveTemplateThread uint32
2848 }
2849
2850
2851
2852
2853
2854
2855
2856
2857 func newm(fn func(), pp *p, id int64) {
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868 acquirem()
2869
2870 mp := allocm(pp, fn, id)
2871 mp.nextp.set(pp)
2872 mp.sigmask = initSigmask
2873 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885 lock(&newmHandoff.lock)
2886 if newmHandoff.haveTemplateThread == 0 {
2887 throw("on a locked thread with no template thread")
2888 }
2889 mp.schedlink = newmHandoff.newm
2890 newmHandoff.newm.set(mp)
2891 if newmHandoff.waiting {
2892 newmHandoff.waiting = false
2893 notewakeup(&newmHandoff.wake)
2894 }
2895 unlock(&newmHandoff.lock)
2896
2897
2898
2899 releasem(getg().m)
2900 return
2901 }
2902 newm1(mp)
2903 releasem(getg().m)
2904 }
2905
2906 func newm1(mp *m) {
2907 if iscgo {
2908 var ts cgothreadstart
2909 if _cgo_thread_start == nil {
2910 throw("_cgo_thread_start missing")
2911 }
2912 ts.g.set(mp.g0)
2913 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2914 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2915 if msanenabled {
2916 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2917 }
2918 if asanenabled {
2919 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2920 }
2921 execLock.rlock()
2922 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2923 execLock.runlock()
2924 return
2925 }
2926 execLock.rlock()
2927 newosproc(mp)
2928 execLock.runlock()
2929 }
2930
2931
2932
2933
2934
2935 func startTemplateThread() {
2936 if GOARCH == "wasm" {
2937 return
2938 }
2939
2940
2941
2942 mp := acquirem()
2943 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2944 releasem(mp)
2945 return
2946 }
2947 newm(templateThread, nil, -1)
2948 releasem(mp)
2949 }
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963 func templateThread() {
2964 lock(&sched.lock)
2965 sched.nmsys++
2966 checkdead()
2967 unlock(&sched.lock)
2968
2969 for {
2970 lock(&newmHandoff.lock)
2971 for newmHandoff.newm != 0 {
2972 newm := newmHandoff.newm.ptr()
2973 newmHandoff.newm = 0
2974 unlock(&newmHandoff.lock)
2975 for newm != nil {
2976 next := newm.schedlink.ptr()
2977 newm.schedlink = 0
2978 newm1(newm)
2979 newm = next
2980 }
2981 lock(&newmHandoff.lock)
2982 }
2983 newmHandoff.waiting = true
2984 noteclear(&newmHandoff.wake)
2985 unlock(&newmHandoff.lock)
2986 notesleep(&newmHandoff.wake)
2987 }
2988 }
2989
2990
2991
2992 func stopm() {
2993 gp := getg()
2994
2995 if gp.m.locks != 0 {
2996 throw("stopm holding locks")
2997 }
2998 if gp.m.p != 0 {
2999 throw("stopm holding p")
3000 }
3001 if gp.m.spinning {
3002 throw("stopm spinning")
3003 }
3004
3005 lock(&sched.lock)
3006 mput(gp.m)
3007 unlock(&sched.lock)
3008 mPark()
3009 acquirep(gp.m.nextp.ptr())
3010 gp.m.nextp = 0
3011 }
3012
3013 func mspinning() {
3014
3015 getg().m.spinning = true
3016 }
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035 func startm(pp *p, spinning, lockheld bool) {
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052 mp := acquirem()
3053 if !lockheld {
3054 lock(&sched.lock)
3055 }
3056 if pp == nil {
3057 if spinning {
3058
3059
3060
3061 throw("startm: P required for spinning=true")
3062 }
3063 pp, _ = pidleget(0)
3064 if pp == nil {
3065 if !lockheld {
3066 unlock(&sched.lock)
3067 }
3068 releasem(mp)
3069 return
3070 }
3071 }
3072 nmp := mget()
3073 if nmp == nil {
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088 id := mReserveID()
3089 unlock(&sched.lock)
3090
3091 var fn func()
3092 if spinning {
3093
3094 fn = mspinning
3095 }
3096 newm(fn, pp, id)
3097
3098 if lockheld {
3099 lock(&sched.lock)
3100 }
3101
3102
3103 releasem(mp)
3104 return
3105 }
3106 if !lockheld {
3107 unlock(&sched.lock)
3108 }
3109 if nmp.spinning {
3110 throw("startm: m is spinning")
3111 }
3112 if nmp.nextp != 0 {
3113 throw("startm: m has p")
3114 }
3115 if spinning && !runqempty(pp) {
3116 throw("startm: p has runnable gs")
3117 }
3118
3119 nmp.spinning = spinning
3120 nmp.nextp.set(pp)
3121 notewakeup(&nmp.park)
3122
3123
3124 releasem(mp)
3125 }
3126
3127
3128
3129
3130
3131 func handoffp(pp *p) {
3132
3133
3134
3135
3136 if !runqempty(pp) || !sched.runq.empty() {
3137 startm(pp, false, false)
3138 return
3139 }
3140
3141 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3142 startm(pp, false, false)
3143 return
3144 }
3145
3146 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) {
3147 startm(pp, false, false)
3148 return
3149 }
3150
3151
3152 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3153 sched.needspinning.Store(0)
3154 startm(pp, true, false)
3155 return
3156 }
3157 lock(&sched.lock)
3158 if sched.gcwaiting.Load() {
3159 pp.status = _Pgcstop
3160 pp.gcStopTime = nanotime()
3161 sched.stopwait--
3162 if sched.stopwait == 0 {
3163 notewakeup(&sched.stopnote)
3164 }
3165 unlock(&sched.lock)
3166 return
3167 }
3168 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3169 sched.safePointFn(pp)
3170 sched.safePointWait--
3171 if sched.safePointWait == 0 {
3172 notewakeup(&sched.safePointNote)
3173 }
3174 }
3175 if !sched.runq.empty() {
3176 unlock(&sched.lock)
3177 startm(pp, false, false)
3178 return
3179 }
3180
3181
3182 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3183 unlock(&sched.lock)
3184 startm(pp, false, false)
3185 return
3186 }
3187
3188
3189
3190 when := pp.timers.wakeTime()
3191 pidleput(pp, 0)
3192 unlock(&sched.lock)
3193
3194 if when != 0 {
3195 wakeNetPoller(when)
3196 }
3197 }
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212 func wakep() {
3213
3214
3215 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3216 return
3217 }
3218
3219
3220
3221
3222
3223
3224 mp := acquirem()
3225
3226 var pp *p
3227 lock(&sched.lock)
3228 pp, _ = pidlegetSpinning(0)
3229 if pp == nil {
3230 if sched.nmspinning.Add(-1) < 0 {
3231 throw("wakep: negative nmspinning")
3232 }
3233 unlock(&sched.lock)
3234 releasem(mp)
3235 return
3236 }
3237
3238
3239
3240
3241 unlock(&sched.lock)
3242
3243 startm(pp, true, false)
3244
3245 releasem(mp)
3246 }
3247
3248
3249
3250 func stoplockedm() {
3251 gp := getg()
3252
3253 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3254 throw("stoplockedm: inconsistent locking")
3255 }
3256 if gp.m.p != 0 {
3257
3258 pp := releasep()
3259 handoffp(pp)
3260 }
3261 incidlelocked(1)
3262
3263 mPark()
3264 status := readgstatus(gp.m.lockedg.ptr())
3265 if status&^_Gscan != _Grunnable {
3266 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3267 dumpgstatus(gp.m.lockedg.ptr())
3268 throw("stoplockedm: not runnable")
3269 }
3270 acquirep(gp.m.nextp.ptr())
3271 gp.m.nextp = 0
3272 }
3273
3274
3275
3276
3277
3278 func startlockedm(gp *g) {
3279 mp := gp.lockedm.ptr()
3280 if mp == getg().m {
3281 throw("startlockedm: locked to me")
3282 }
3283 if mp.nextp != 0 {
3284 throw("startlockedm: m has p")
3285 }
3286
3287 incidlelocked(-1)
3288 pp := releasep()
3289 mp.nextp.set(pp)
3290 notewakeup(&mp.park)
3291 stopm()
3292 }
3293
3294
3295
3296 func gcstopm() {
3297 gp := getg()
3298
3299 if !sched.gcwaiting.Load() {
3300 throw("gcstopm: not waiting for gc")
3301 }
3302 if gp.m.spinning {
3303 gp.m.spinning = false
3304
3305
3306 if sched.nmspinning.Add(-1) < 0 {
3307 throw("gcstopm: negative nmspinning")
3308 }
3309 }
3310 pp := releasep()
3311 lock(&sched.lock)
3312 pp.status = _Pgcstop
3313 pp.gcStopTime = nanotime()
3314 sched.stopwait--
3315 if sched.stopwait == 0 {
3316 notewakeup(&sched.stopnote)
3317 }
3318 unlock(&sched.lock)
3319 stopm()
3320 }
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331 func execute(gp *g, inheritTime bool) {
3332 mp := getg().m
3333
3334 if goroutineProfile.active {
3335
3336
3337
3338 tryRecordGoroutineProfile(gp, nil, osyield)
3339 }
3340
3341
3342 mp.curg = gp
3343 gp.m = mp
3344 gp.syncSafePoint = false
3345 casgstatus(gp, _Grunnable, _Grunning)
3346 gp.waitsince = 0
3347 gp.preempt = false
3348 gp.stackguard0 = gp.stack.lo + stackGuard
3349 if !inheritTime {
3350 mp.p.ptr().schedtick++
3351 }
3352
3353 if sys.DITSupported && debug.dataindependenttiming != 1 {
3354 if gp.ditWanted && !mp.ditEnabled {
3355
3356
3357 sys.EnableDIT()
3358 mp.ditEnabled = true
3359 } else if !gp.ditWanted && mp.ditEnabled {
3360
3361
3362
3363
3364
3365 sys.DisableDIT()
3366 mp.ditEnabled = false
3367 }
3368 }
3369
3370
3371 hz := sched.profilehz
3372 if mp.profilehz != hz {
3373 setThreadCPUProfiler(hz)
3374 }
3375
3376 trace := traceAcquire()
3377 if trace.ok() {
3378 trace.GoStart()
3379 traceRelease(trace)
3380 }
3381
3382 gogo(&gp.sched)
3383 }
3384
3385
3386
3387
3388
3389 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3390 mp := getg().m
3391
3392
3393
3394
3395
3396 top:
3397
3398
3399
3400 mp.clearAllpSnapshot()
3401
3402 pp := mp.p.ptr()
3403 if sched.gcwaiting.Load() {
3404 gcstopm()
3405 goto top
3406 }
3407 if pp.runSafePointFn != 0 {
3408 runSafePointFn()
3409 }
3410
3411
3412
3413
3414
3415 now, pollUntil, _ := pp.timers.check(0, nil)
3416
3417
3418 if traceEnabled() || traceShuttingDown() {
3419 gp := traceReader()
3420 if gp != nil {
3421 trace := traceAcquire()
3422 casgstatus(gp, _Gwaiting, _Grunnable)
3423 if trace.ok() {
3424 trace.GoUnpark(gp, 0)
3425 traceRelease(trace)
3426 }
3427 return gp, false, true
3428 }
3429 }
3430
3431
3432 if gcBlackenEnabled != 0 {
3433 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3434 if gp != nil {
3435 return gp, false, true
3436 }
3437 now = tnow
3438 }
3439
3440
3441
3442
3443 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3444 lock(&sched.lock)
3445 gp := globrunqget()
3446 unlock(&sched.lock)
3447 if gp != nil {
3448 return gp, false, false
3449 }
3450 }
3451
3452
3453 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3454 if gp := wakefing(); gp != nil {
3455 ready(gp, 0, true)
3456 }
3457 }
3458
3459
3460 if gcCleanups.needsWake() {
3461 gcCleanups.wake()
3462 }
3463
3464 if *cgo_yield != nil {
3465 asmcgocall(*cgo_yield, nil)
3466 }
3467
3468
3469 if gp, inheritTime := runqget(pp); gp != nil {
3470 return gp, inheritTime, false
3471 }
3472
3473
3474 if !sched.runq.empty() {
3475 lock(&sched.lock)
3476 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3477 unlock(&sched.lock)
3478 if gp != nil {
3479 if runqputbatch(pp, &q); !q.empty() {
3480 throw("Couldn't put Gs into empty local runq")
3481 }
3482 return gp, false, false
3483 }
3484 }
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3496 list, delta := netpoll(0)
3497 sched.pollingNet.Store(0)
3498 if !list.empty() {
3499 gp := list.pop()
3500 injectglist(&list)
3501 netpollAdjustWaiters(delta)
3502 trace := traceAcquire()
3503 casgstatus(gp, _Gwaiting, _Grunnable)
3504 if trace.ok() {
3505 trace.GoUnpark(gp, 0)
3506 traceRelease(trace)
3507 }
3508 return gp, false, false
3509 }
3510 }
3511
3512
3513
3514
3515
3516
3517 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3518 if !mp.spinning {
3519 mp.becomeSpinning()
3520 }
3521
3522 gp, inheritTime, tnow, w, newWork := stealWork(now)
3523 if gp != nil {
3524
3525 return gp, inheritTime, false
3526 }
3527 if newWork {
3528
3529
3530 goto top
3531 }
3532
3533 now = tnow
3534 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3535
3536 pollUntil = w
3537 }
3538 }
3539
3540
3541
3542
3543
3544 if gcBlackenEnabled != 0 && gcShouldScheduleWorker(pp) && gcController.addIdleMarkWorker() {
3545 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3546 if node != nil {
3547 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3548 gp := node.gp.ptr()
3549
3550 trace := traceAcquire()
3551 casgstatus(gp, _Gwaiting, _Grunnable)
3552 if trace.ok() {
3553 trace.GoUnpark(gp, 0)
3554 traceRelease(trace)
3555 }
3556 return gp, false, false
3557 }
3558 gcController.removeIdleMarkWorker()
3559 }
3560
3561
3562
3563
3564
3565 gp, otherReady := beforeIdle(now, pollUntil)
3566 if gp != nil {
3567 trace := traceAcquire()
3568 casgstatus(gp, _Gwaiting, _Grunnable)
3569 if trace.ok() {
3570 trace.GoUnpark(gp, 0)
3571 traceRelease(trace)
3572 }
3573 return gp, false, false
3574 }
3575 if otherReady {
3576 goto top
3577 }
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587 allpSnapshot := mp.snapshotAllp()
3588
3589
3590 idlepMaskSnapshot := idlepMask
3591 timerpMaskSnapshot := timerpMask
3592
3593
3594 lock(&sched.lock)
3595 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3596 unlock(&sched.lock)
3597 goto top
3598 }
3599 if !sched.runq.empty() {
3600 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3601 unlock(&sched.lock)
3602 if gp == nil {
3603 throw("global runq empty with non-zero runqsize")
3604 }
3605 if runqputbatch(pp, &q); !q.empty() {
3606 throw("Couldn't put Gs into empty local runq")
3607 }
3608 return gp, false, false
3609 }
3610 if !mp.spinning && sched.needspinning.Load() == 1 {
3611
3612 mp.becomeSpinning()
3613 unlock(&sched.lock)
3614 goto top
3615 }
3616 if releasep() != pp {
3617 throw("findRunnable: wrong p")
3618 }
3619 now = pidleput(pp, now)
3620 unlock(&sched.lock)
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658 wasSpinning := mp.spinning
3659 if mp.spinning {
3660 mp.spinning = false
3661 if sched.nmspinning.Add(-1) < 0 {
3662 throw("findRunnable: negative nmspinning")
3663 }
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676 lock(&sched.lock)
3677 if !sched.runq.empty() {
3678 pp, _ := pidlegetSpinning(0)
3679 if pp != nil {
3680 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3681 unlock(&sched.lock)
3682 if gp == nil {
3683 throw("global runq empty with non-zero runqsize")
3684 }
3685 if runqputbatch(pp, &q); !q.empty() {
3686 throw("Couldn't put Gs into empty local runq")
3687 }
3688 acquirep(pp)
3689 mp.becomeSpinning()
3690 return gp, false, false
3691 }
3692 }
3693 unlock(&sched.lock)
3694
3695 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3696 if pp != nil {
3697 acquirep(pp)
3698 mp.becomeSpinning()
3699 goto top
3700 }
3701
3702
3703 pp, gp := checkIdleGCNoP()
3704 if pp != nil {
3705 acquirep(pp)
3706 mp.becomeSpinning()
3707
3708
3709 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3710 trace := traceAcquire()
3711 casgstatus(gp, _Gwaiting, _Grunnable)
3712 if trace.ok() {
3713 trace.GoUnpark(gp, 0)
3714 traceRelease(trace)
3715 }
3716 return gp, false, false
3717 }
3718
3719
3720
3721
3722
3723
3724
3725 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3726 }
3727
3728
3729
3730
3731
3732 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3733 sched.pollUntil.Store(pollUntil)
3734 if mp.p != 0 {
3735 throw("findRunnable: netpoll with p")
3736 }
3737 if mp.spinning {
3738 throw("findRunnable: netpoll with spinning")
3739 }
3740 delay := int64(-1)
3741 if pollUntil != 0 {
3742 if now == 0 {
3743 now = nanotime()
3744 }
3745 delay = pollUntil - now
3746 if delay < 0 {
3747 delay = 0
3748 }
3749 }
3750 if faketime != 0 {
3751
3752 delay = 0
3753 }
3754 list, delta := netpoll(delay)
3755
3756 now = nanotime()
3757 sched.pollUntil.Store(0)
3758 sched.lastpoll.Store(now)
3759 if faketime != 0 && list.empty() {
3760
3761
3762 stopm()
3763 goto top
3764 }
3765 lock(&sched.lock)
3766 pp, _ := pidleget(now)
3767 unlock(&sched.lock)
3768 if pp == nil {
3769 injectglist(&list)
3770 netpollAdjustWaiters(delta)
3771 } else {
3772 acquirep(pp)
3773 if !list.empty() {
3774 gp := list.pop()
3775 injectglist(&list)
3776 netpollAdjustWaiters(delta)
3777 trace := traceAcquire()
3778 casgstatus(gp, _Gwaiting, _Grunnable)
3779 if trace.ok() {
3780 trace.GoUnpark(gp, 0)
3781 traceRelease(trace)
3782 }
3783 return gp, false, false
3784 }
3785 if wasSpinning {
3786 mp.becomeSpinning()
3787 }
3788 goto top
3789 }
3790 } else if pollUntil != 0 && netpollinited() {
3791 pollerPollUntil := sched.pollUntil.Load()
3792 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3793 netpollBreak()
3794 }
3795 }
3796 stopm()
3797 goto top
3798 }
3799
3800
3801
3802
3803
3804 func pollWork() bool {
3805 if !sched.runq.empty() {
3806 return true
3807 }
3808 p := getg().m.p.ptr()
3809 if !runqempty(p) {
3810 return true
3811 }
3812 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3813 if list, delta := netpoll(0); !list.empty() {
3814 injectglist(&list)
3815 netpollAdjustWaiters(delta)
3816 return true
3817 }
3818 }
3819 return false
3820 }
3821
3822
3823
3824
3825
3826
3827
3828 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3829 pp := getg().m.p.ptr()
3830
3831 ranTimer := false
3832
3833 const stealTries = 4
3834 for i := 0; i < stealTries; i++ {
3835 stealTimersOrRunNextG := i == stealTries-1
3836
3837 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3838 if sched.gcwaiting.Load() {
3839
3840 return nil, false, now, pollUntil, true
3841 }
3842 p2 := allp[enum.position()]
3843 if pp == p2 {
3844 continue
3845 }
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3861 tnow, w, ran := p2.timers.check(now, nil)
3862 now = tnow
3863 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3864 pollUntil = w
3865 }
3866 if ran {
3867
3868
3869
3870
3871
3872
3873
3874
3875 if gp, inheritTime := runqget(pp); gp != nil {
3876 return gp, inheritTime, now, pollUntil, ranTimer
3877 }
3878 ranTimer = true
3879 }
3880 }
3881
3882
3883 if !idlepMask.read(enum.position()) {
3884 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3885 return gp, false, now, pollUntil, ranTimer
3886 }
3887 }
3888 }
3889 }
3890
3891
3892
3893
3894 return nil, false, now, pollUntil, ranTimer
3895 }
3896
3897
3898
3899
3900
3901
3902 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3903 for id, p2 := range allpSnapshot {
3904 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3905 lock(&sched.lock)
3906 pp, _ := pidlegetSpinning(0)
3907 if pp == nil {
3908
3909 unlock(&sched.lock)
3910 return nil
3911 }
3912 unlock(&sched.lock)
3913 return pp
3914 }
3915 }
3916
3917
3918 return nil
3919 }
3920
3921
3922
3923
3924 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3925 for id, p2 := range allpSnapshot {
3926 if timerpMaskSnapshot.read(uint32(id)) {
3927 w := p2.timers.wakeTime()
3928 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3929 pollUntil = w
3930 }
3931 }
3932 }
3933
3934 return pollUntil
3935 }
3936
3937
3938
3939
3940
3941 func checkIdleGCNoP() (*p, *g) {
3942
3943
3944
3945
3946
3947
3948 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3949 return nil, nil
3950 }
3951 if !gcShouldScheduleWorker(nil) {
3952 return nil, nil
3953 }
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972 lock(&sched.lock)
3973 pp, now := pidlegetSpinning(0)
3974 if pp == nil {
3975 unlock(&sched.lock)
3976 return nil, nil
3977 }
3978
3979
3980 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3981 pidleput(pp, now)
3982 unlock(&sched.lock)
3983 return nil, nil
3984 }
3985
3986 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3987 if node == nil {
3988 pidleput(pp, now)
3989 unlock(&sched.lock)
3990 gcController.removeIdleMarkWorker()
3991 return nil, nil
3992 }
3993
3994 unlock(&sched.lock)
3995
3996 return pp, node.gp.ptr()
3997 }
3998
3999
4000
4001
4002 func wakeNetPoller(when int64) {
4003 if sched.lastpoll.Load() == 0 {
4004
4005
4006
4007
4008 pollerPollUntil := sched.pollUntil.Load()
4009 if pollerPollUntil == 0 || pollerPollUntil > when {
4010 netpollBreak()
4011 }
4012 } else {
4013
4014
4015 if GOOS != "plan9" {
4016 wakep()
4017 }
4018 }
4019 }
4020
4021 func resetspinning() {
4022 gp := getg()
4023 if !gp.m.spinning {
4024 throw("resetspinning: not a spinning m")
4025 }
4026 gp.m.spinning = false
4027 nmspinning := sched.nmspinning.Add(-1)
4028 if nmspinning < 0 {
4029 throw("findRunnable: negative nmspinning")
4030 }
4031
4032
4033
4034 wakep()
4035 }
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045 func injectglist(glist *gList) {
4046 if glist.empty() {
4047 return
4048 }
4049
4050
4051
4052 var tail *g
4053 trace := traceAcquire()
4054 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4055 tail = gp
4056 casgstatus(gp, _Gwaiting, _Grunnable)
4057 if trace.ok() {
4058 trace.GoUnpark(gp, 0)
4059 }
4060 }
4061 if trace.ok() {
4062 traceRelease(trace)
4063 }
4064
4065
4066 q := gQueue{glist.head, tail.guintptr(), glist.size}
4067 *glist = gList{}
4068
4069 startIdle := func(n int32) {
4070 for ; n > 0; n-- {
4071 mp := acquirem()
4072 lock(&sched.lock)
4073
4074 pp, _ := pidlegetSpinning(0)
4075 if pp == nil {
4076 unlock(&sched.lock)
4077 releasem(mp)
4078 break
4079 }
4080
4081 startm(pp, false, true)
4082 unlock(&sched.lock)
4083 releasem(mp)
4084 }
4085 }
4086
4087 pp := getg().m.p.ptr()
4088 if pp == nil {
4089 n := q.size
4090 lock(&sched.lock)
4091 globrunqputbatch(&q)
4092 unlock(&sched.lock)
4093 startIdle(n)
4094 return
4095 }
4096
4097 var globq gQueue
4098 npidle := sched.npidle.Load()
4099 for ; npidle > 0 && !q.empty(); npidle-- {
4100 g := q.pop()
4101 globq.pushBack(g)
4102 }
4103 if !globq.empty() {
4104 n := globq.size
4105 lock(&sched.lock)
4106 globrunqputbatch(&globq)
4107 unlock(&sched.lock)
4108 startIdle(n)
4109 }
4110
4111 if runqputbatch(pp, &q); !q.empty() {
4112 lock(&sched.lock)
4113 globrunqputbatch(&q)
4114 unlock(&sched.lock)
4115 }
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130 wakep()
4131 }
4132
4133
4134
4135 func schedule() {
4136 mp := getg().m
4137
4138 if mp.locks != 0 {
4139 throw("schedule: holding locks")
4140 }
4141
4142 if mp.lockedg != 0 {
4143 stoplockedm()
4144 execute(mp.lockedg.ptr(), false)
4145 }
4146
4147
4148
4149 if mp.incgo {
4150 throw("schedule: in cgo")
4151 }
4152
4153 top:
4154 pp := mp.p.ptr()
4155 pp.preempt = false
4156
4157
4158
4159
4160 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4161 throw("schedule: spinning with local work")
4162 }
4163
4164 gp, inheritTime, tryWakeP := findRunnable()
4165
4166
4167 pp = mp.p.ptr()
4168
4169
4170
4171
4172 mp.clearAllpSnapshot()
4173
4174
4175
4176
4177
4178
4179
4180
4181 gcController.releaseNextGCMarkWorker(pp)
4182
4183 if debug.dontfreezetheworld > 0 && freezing.Load() {
4184
4185
4186
4187
4188
4189
4190
4191 lock(&deadlock)
4192 lock(&deadlock)
4193 }
4194
4195
4196
4197
4198 if mp.spinning {
4199 resetspinning()
4200 }
4201
4202 if sched.disable.user && !schedEnabled(gp) {
4203
4204
4205
4206 lock(&sched.lock)
4207 if schedEnabled(gp) {
4208
4209
4210 unlock(&sched.lock)
4211 } else {
4212 sched.disable.runnable.pushBack(gp)
4213 unlock(&sched.lock)
4214 goto top
4215 }
4216 }
4217
4218
4219
4220 if tryWakeP {
4221 wakep()
4222 }
4223 if gp.lockedm != 0 {
4224
4225
4226 startlockedm(gp)
4227 goto top
4228 }
4229
4230 execute(gp, inheritTime)
4231 }
4232
4233
4234
4235
4236
4237
4238
4239
4240 func dropg() {
4241 gp := getg()
4242
4243 setMNoWB(&gp.m.curg.m, nil)
4244 setGNoWB(&gp.m.curg, nil)
4245 }
4246
4247 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4248 unlock((*mutex)(lock))
4249 return true
4250 }
4251
4252
4253 func park_m(gp *g) {
4254 mp := getg().m
4255
4256 trace := traceAcquire()
4257
4258
4259
4260
4261
4262 bubble := gp.bubble
4263 if bubble != nil {
4264 bubble.incActive()
4265 }
4266
4267 if trace.ok() {
4268
4269
4270
4271 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4272 }
4273
4274
4275 casgstatus(gp, _Grunning, _Gwaiting)
4276 if trace.ok() {
4277 traceRelease(trace)
4278 }
4279
4280 dropg()
4281
4282 if fn := mp.waitunlockf; fn != nil {
4283 ok := fn(gp, mp.waitlock)
4284 mp.waitunlockf = nil
4285 mp.waitlock = nil
4286 if !ok {
4287 trace := traceAcquire()
4288 casgstatus(gp, _Gwaiting, _Grunnable)
4289 if bubble != nil {
4290 bubble.decActive()
4291 }
4292 if trace.ok() {
4293 trace.GoUnpark(gp, 2)
4294 traceRelease(trace)
4295 }
4296 execute(gp, true)
4297 }
4298 }
4299
4300 if bubble != nil {
4301 bubble.decActive()
4302 }
4303
4304 schedule()
4305 }
4306
4307 func goschedImpl(gp *g, preempted bool) {
4308 pp := gp.m.p.ptr()
4309 trace := traceAcquire()
4310 status := readgstatus(gp)
4311 if status&^_Gscan != _Grunning {
4312 dumpgstatus(gp)
4313 throw("bad g status")
4314 }
4315 if trace.ok() {
4316
4317
4318
4319 if preempted {
4320 trace.GoPreempt()
4321 } else {
4322 trace.GoSched()
4323 }
4324 }
4325 casgstatus(gp, _Grunning, _Grunnable)
4326 if trace.ok() {
4327 traceRelease(trace)
4328 }
4329
4330 dropg()
4331 if preempted && sched.gcwaiting.Load() {
4332
4333
4334 runqput(pp, gp, true)
4335 } else {
4336 lock(&sched.lock)
4337 globrunqput(gp)
4338 unlock(&sched.lock)
4339 }
4340
4341 if mainStarted {
4342 wakep()
4343 }
4344
4345 schedule()
4346 }
4347
4348
4349 func gosched_m(gp *g) {
4350 goschedImpl(gp, false)
4351 }
4352
4353
4354 func goschedguarded_m(gp *g) {
4355 if !canPreemptM(gp.m) {
4356 gogo(&gp.sched)
4357 }
4358 goschedImpl(gp, false)
4359 }
4360
4361 func gopreempt_m(gp *g) {
4362 goschedImpl(gp, true)
4363 }
4364
4365
4366
4367
4368 func preemptPark(gp *g) {
4369 status := readgstatus(gp)
4370 if status&^_Gscan != _Grunning {
4371 dumpgstatus(gp)
4372 throw("bad g status")
4373 }
4374
4375 if gp.asyncSafePoint {
4376
4377
4378
4379 f := findfunc(gp.sched.pc)
4380 if !f.valid() {
4381 throw("preempt at unknown pc")
4382 }
4383 if f.flag&abi.FuncFlagSPWrite != 0 {
4384 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4385 throw("preempt SPWRITE")
4386 }
4387 }
4388
4389
4390
4391
4392
4393
4394
4395 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417 trace := traceAcquire()
4418 if trace.ok() {
4419 trace.GoPark(traceBlockPreempted, 0)
4420 }
4421
4422
4423
4424
4425 dropg()
4426
4427
4428 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4429 if trace.ok() {
4430 traceRelease(trace)
4431 }
4432
4433
4434 schedule()
4435 }
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451 func goyield() {
4452 checkTimeouts()
4453 mcall(goyield_m)
4454 }
4455
4456 func goyield_m(gp *g) {
4457 trace := traceAcquire()
4458 pp := gp.m.p.ptr()
4459 if trace.ok() {
4460
4461
4462
4463 trace.GoPreempt()
4464 }
4465 casgstatus(gp, _Grunning, _Grunnable)
4466 if trace.ok() {
4467 traceRelease(trace)
4468 }
4469 dropg()
4470 runqput(pp, gp, false)
4471 schedule()
4472 }
4473
4474
4475 func goexit1() {
4476 if raceenabled {
4477 if gp := getg(); gp.bubble != nil {
4478 racereleasemergeg(gp, gp.bubble.raceaddr())
4479 }
4480 racegoend()
4481 }
4482 trace := traceAcquire()
4483 if trace.ok() {
4484 trace.GoEnd()
4485 traceRelease(trace)
4486 }
4487 mcall(goexit0)
4488 }
4489
4490
4491 func goexit0(gp *g) {
4492 if goexperiment.RuntimeSecret && gp.secret > 0 {
4493
4494
4495 memclrNoHeapPointers(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4496
4497
4498 }
4499 gdestroy(gp)
4500 schedule()
4501 }
4502
4503 func gdestroy(gp *g) {
4504 mp := getg().m
4505 pp := mp.p.ptr()
4506
4507 casgstatus(gp, _Grunning, _Gdead)
4508 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4509 if isSystemGoroutine(gp, false) {
4510 sched.ngsys.Add(-1)
4511 }
4512 gp.m = nil
4513 locked := gp.lockedm != 0
4514 gp.lockedm = 0
4515 mp.lockedg = 0
4516 gp.preemptStop = false
4517 gp.paniconfault = false
4518 gp._defer = nil
4519 gp._panic = nil
4520 gp.writebuf = nil
4521 gp.waitreason = waitReasonZero
4522 gp.param = nil
4523 gp.labels = nil
4524 gp.timer = nil
4525 gp.bubble = nil
4526 gp.fipsOnlyBypass = false
4527 gp.secret = 0
4528
4529 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4530
4531
4532
4533 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4534 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4535 gcController.bgScanCredit.Add(scanCredit)
4536 gp.gcAssistBytes = 0
4537 }
4538
4539 dropg()
4540
4541 if GOARCH == "wasm" {
4542 gfput(pp, gp)
4543 return
4544 }
4545
4546 if locked && mp.lockedInt != 0 {
4547 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4548 if mp.isextra {
4549 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4550 }
4551 throw("exited a goroutine internally locked to the OS thread")
4552 }
4553 gfput(pp, gp)
4554 if locked {
4555
4556
4557
4558
4559
4560
4561 if GOOS != "plan9" {
4562 gogo(&mp.g0.sched)
4563 } else {
4564
4565
4566 mp.lockedExt = 0
4567 }
4568 }
4569 }
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579 func save(pc, sp, bp uintptr) {
4580 gp := getg()
4581
4582 if gp == gp.m.g0 || gp == gp.m.gsignal {
4583
4584
4585
4586
4587
4588 throw("save on system g not allowed")
4589 }
4590
4591 gp.sched.pc = pc
4592 gp.sched.sp = sp
4593 gp.sched.lr = 0
4594 gp.sched.bp = bp
4595
4596
4597
4598 if gp.sched.ctxt != nil {
4599 badctxt()
4600 }
4601 }
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627 func reentersyscall(pc, sp, bp uintptr) {
4628 gp := getg()
4629
4630
4631
4632 gp.m.locks++
4633
4634
4635
4636
4637
4638 gp.stackguard0 = stackPreempt
4639 gp.throwsplit = true
4640
4641
4642 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4643
4644 pp := gp.m.p.ptr()
4645 if pp.runSafePointFn != 0 {
4646
4647 systemstack(runSafePointFn)
4648 }
4649 gp.m.oldp.set(pp)
4650
4651
4652 save(pc, sp, bp)
4653 gp.syscallsp = sp
4654 gp.syscallpc = pc
4655 gp.syscallbp = bp
4656
4657
4658 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4659 systemstack(func() {
4660 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4661 throw("entersyscall")
4662 })
4663 }
4664 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4665 systemstack(func() {
4666 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4667 throw("entersyscall")
4668 })
4669 }
4670 trace := traceAcquire()
4671 if trace.ok() {
4672
4673
4674
4675
4676 systemstack(func() {
4677 trace.GoSysCall()
4678 })
4679
4680 save(pc, sp, bp)
4681 }
4682 if sched.gcwaiting.Load() {
4683
4684
4685
4686 systemstack(func() {
4687 entersyscallHandleGCWait(trace)
4688 })
4689
4690 save(pc, sp, bp)
4691 }
4692
4693
4694
4695
4696
4697 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Grunning, _Gsyscall) {
4698 casgstatus(gp, _Grunning, _Gsyscall)
4699 }
4700 if staticLockRanking {
4701
4702 save(pc, sp, bp)
4703 }
4704 if trace.ok() {
4705
4706
4707
4708 traceRelease(trace)
4709 }
4710 if sched.sysmonwait.Load() {
4711 systemstack(entersyscallWakeSysmon)
4712
4713 save(pc, sp, bp)
4714 }
4715 gp.m.locks--
4716 }
4717
4718
4719
4720
4721 const debugExtendGrunningNoP = false
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737 func entersyscall() {
4738
4739
4740
4741
4742 fp := getcallerfp()
4743 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4744 }
4745
4746 func entersyscallWakeSysmon() {
4747 lock(&sched.lock)
4748 if sched.sysmonwait.Load() {
4749 sched.sysmonwait.Store(false)
4750 notewakeup(&sched.sysmonnote)
4751 }
4752 unlock(&sched.lock)
4753 }
4754
4755 func entersyscallHandleGCWait(trace traceLocker) {
4756 gp := getg()
4757
4758 lock(&sched.lock)
4759 if sched.stopwait > 0 {
4760
4761 pp := gp.m.p.ptr()
4762 pp.m = 0
4763 gp.m.p = 0
4764 atomic.Store(&pp.status, _Pgcstop)
4765
4766 if trace.ok() {
4767 trace.ProcStop(pp)
4768 }
4769 addGSyscallNoP(gp.m)
4770 pp.gcStopTime = nanotime()
4771 pp.syscalltick++
4772 if sched.stopwait--; sched.stopwait == 0 {
4773 notewakeup(&sched.stopnote)
4774 }
4775 }
4776 unlock(&sched.lock)
4777 }
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791 func entersyscallblock() {
4792 gp := getg()
4793
4794 gp.m.locks++
4795 gp.throwsplit = true
4796 gp.stackguard0 = stackPreempt
4797 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4798 gp.m.p.ptr().syscalltick++
4799
4800 addGSyscallNoP(gp.m)
4801
4802
4803 pc := sys.GetCallerPC()
4804 sp := sys.GetCallerSP()
4805 bp := getcallerfp()
4806 save(pc, sp, bp)
4807 gp.syscallsp = gp.sched.sp
4808 gp.syscallpc = gp.sched.pc
4809 gp.syscallbp = gp.sched.bp
4810 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4811 sp1 := sp
4812 sp2 := gp.sched.sp
4813 sp3 := gp.syscallsp
4814 systemstack(func() {
4815 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4816 throw("entersyscallblock")
4817 })
4818 }
4819
4820
4821
4822
4823
4824
4825 trace := traceAcquire()
4826 systemstack(func() {
4827 if trace.ok() {
4828 trace.GoSysCall()
4829 }
4830 handoffp(releasep())
4831 })
4832
4833
4834
4835 if debugExtendGrunningNoP {
4836 usleep(10)
4837 }
4838 casgstatus(gp, _Grunning, _Gsyscall)
4839 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4840 systemstack(func() {
4841 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4842 throw("entersyscallblock")
4843 })
4844 }
4845 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4846 systemstack(func() {
4847 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4848 throw("entersyscallblock")
4849 })
4850 }
4851 if trace.ok() {
4852 systemstack(func() {
4853 traceRelease(trace)
4854 })
4855 }
4856
4857
4858 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4859
4860 gp.m.locks--
4861 }
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883 func exitsyscall() {
4884 gp := getg()
4885
4886 gp.m.locks++
4887 if sys.GetCallerSP() > gp.syscallsp {
4888 throw("exitsyscall: syscall frame is no longer valid")
4889 }
4890 gp.waitsince = 0
4891
4892 if sched.stopwait == freezeStopWait {
4893
4894
4895
4896 systemstack(func() {
4897 lock(&deadlock)
4898 lock(&deadlock)
4899 })
4900 }
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913 if gp.bubble != nil || !gp.atomicstatus.CompareAndSwap(_Gsyscall, _Grunning) {
4914 casgstatus(gp, _Gsyscall, _Grunning)
4915 }
4916
4917
4918
4919
4920 if debugExtendGrunningNoP {
4921 usleep(10)
4922 }
4923
4924
4925 oldp := gp.m.oldp.ptr()
4926 gp.m.oldp.set(nil)
4927
4928
4929 pp := gp.m.p.ptr()
4930 if pp != nil {
4931
4932 if trace := traceAcquire(); trace.ok() {
4933 systemstack(func() {
4934
4935
4936
4937
4938
4939
4940
4941
4942 if pp.syscalltick == gp.m.syscalltick {
4943 trace.GoSysExit(false)
4944 } else {
4945
4946
4947
4948
4949 trace.ProcSteal(pp)
4950 trace.ProcStart()
4951 trace.GoSysExit(true)
4952 trace.GoStart()
4953 }
4954 traceRelease(trace)
4955 })
4956 }
4957 } else {
4958
4959 systemstack(func() {
4960
4961 if pp := exitsyscallTryGetP(oldp); pp != nil {
4962
4963 acquirepNoTrace(pp)
4964
4965
4966 if trace := traceAcquire(); trace.ok() {
4967 trace.ProcStart()
4968 trace.GoSysExit(true)
4969 trace.GoStart()
4970 traceRelease(trace)
4971 }
4972 }
4973 })
4974 pp = gp.m.p.ptr()
4975 }
4976
4977
4978 if pp != nil {
4979 if goroutineProfile.active {
4980
4981
4982
4983 systemstack(func() {
4984 tryRecordGoroutineProfileWB(gp)
4985 })
4986 }
4987
4988
4989 pp.syscalltick++
4990
4991
4992
4993 gp.syscallsp = 0
4994 gp.m.locks--
4995 if gp.preempt {
4996
4997 gp.stackguard0 = stackPreempt
4998 } else {
4999
5000 gp.stackguard0 = gp.stack.lo + stackGuard
5001 }
5002 gp.throwsplit = false
5003
5004 if sched.disable.user && !schedEnabled(gp) {
5005
5006 Gosched()
5007 }
5008 return
5009 }
5010
5011 gp.m.locks--
5012
5013
5014 mcall(exitsyscallNoP)
5015
5016
5017
5018
5019
5020
5021
5022 gp.syscallsp = 0
5023 gp.m.p.ptr().syscalltick++
5024 gp.throwsplit = false
5025 }
5026
5027
5028
5029
5030
5031
5032
5033 func exitsyscallTryGetP(oldp *p) *p {
5034
5035 if oldp != nil {
5036 if thread, ok := setBlockOnExitSyscall(oldp); ok {
5037 thread.takeP()
5038 decGSyscallNoP(getg().m)
5039 thread.resume()
5040 return oldp
5041 }
5042 }
5043
5044
5045 if sched.pidle != 0 {
5046 lock(&sched.lock)
5047 pp, _ := pidleget(0)
5048 if pp != nil && sched.sysmonwait.Load() {
5049 sched.sysmonwait.Store(false)
5050 notewakeup(&sched.sysmonnote)
5051 }
5052 unlock(&sched.lock)
5053 if pp != nil {
5054 decGSyscallNoP(getg().m)
5055 return pp
5056 }
5057 }
5058 return nil
5059 }
5060
5061
5062
5063
5064
5065
5066
5067 func exitsyscallNoP(gp *g) {
5068 traceExitingSyscall()
5069 trace := traceAcquire()
5070 casgstatus(gp, _Grunning, _Grunnable)
5071 traceExitedSyscall()
5072 if trace.ok() {
5073
5074
5075
5076
5077 trace.GoSysExit(true)
5078 traceRelease(trace)
5079 }
5080 decGSyscallNoP(getg().m)
5081 dropg()
5082 lock(&sched.lock)
5083 var pp *p
5084 if schedEnabled(gp) {
5085 pp, _ = pidleget(0)
5086 }
5087 var locked bool
5088 if pp == nil {
5089 globrunqput(gp)
5090
5091
5092
5093
5094
5095
5096 locked = gp.lockedm != 0
5097 } else if sched.sysmonwait.Load() {
5098 sched.sysmonwait.Store(false)
5099 notewakeup(&sched.sysmonnote)
5100 }
5101 unlock(&sched.lock)
5102 if pp != nil {
5103 acquirep(pp)
5104 execute(gp, false)
5105 }
5106 if locked {
5107
5108
5109
5110
5111 stoplockedm()
5112 execute(gp, false)
5113 }
5114 stopm()
5115 schedule()
5116 }
5117
5118
5119
5120
5121
5122
5123
5124 func addGSyscallNoP(mp *m) {
5125
5126
5127
5128 if !mp.isExtraInC {
5129
5130
5131
5132
5133
5134 sched.nGsyscallNoP.Add(1)
5135 }
5136 }
5137
5138
5139
5140
5141
5142
5143
5144 func decGSyscallNoP(mp *m) {
5145
5146
5147
5148 if !mp.isExtraInC {
5149 sched.nGsyscallNoP.Add(-1)
5150 }
5151 }
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165 func syscall_runtime_BeforeFork() {
5166 gp := getg().m.curg
5167
5168
5169
5170
5171 gp.m.locks++
5172 sigsave(&gp.m.sigmask)
5173 sigblock(false)
5174
5175
5176
5177
5178
5179 gp.stackguard0 = stackFork
5180 }
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194 func syscall_runtime_AfterFork() {
5195 gp := getg().m.curg
5196
5197
5198 gp.stackguard0 = gp.stack.lo + stackGuard
5199
5200 msigrestore(gp.m.sigmask)
5201
5202 gp.m.locks--
5203 }
5204
5205
5206
5207 var inForkedChild bool
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228 func syscall_runtime_AfterForkInChild() {
5229
5230
5231
5232
5233 inForkedChild = true
5234
5235 clearSignalHandlers()
5236
5237
5238
5239 msigrestore(getg().m.sigmask)
5240
5241 inForkedChild = false
5242 }
5243
5244
5245
5246
5247 var pendingPreemptSignals atomic.Int32
5248
5249
5250
5251
5252 func syscall_runtime_BeforeExec() {
5253
5254 execLock.lock()
5255
5256
5257
5258 if GOOS == "darwin" || GOOS == "ios" {
5259 for pendingPreemptSignals.Load() > 0 {
5260 osyield()
5261 }
5262 }
5263 }
5264
5265
5266
5267
5268 func syscall_runtime_AfterExec() {
5269 execLock.unlock()
5270 }
5271
5272
5273 func malg(stacksize int32) *g {
5274 newg := new(g)
5275 if stacksize >= 0 {
5276 stacksize = round2(stackSystem + stacksize)
5277 systemstack(func() {
5278 newg.stack = stackalloc(uint32(stacksize))
5279 if valgrindenabled {
5280 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5281 }
5282 })
5283 newg.stackguard0 = newg.stack.lo + stackGuard
5284 newg.stackguard1 = ^uintptr(0)
5285
5286
5287 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5288 }
5289 return newg
5290 }
5291
5292
5293
5294
5295 func newproc(fn *funcval) {
5296 gp := getg()
5297 pc := sys.GetCallerPC()
5298 systemstack(func() {
5299 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5300
5301 pp := getg().m.p.ptr()
5302 runqput(pp, newg, true)
5303
5304 if mainStarted {
5305 wakep()
5306 }
5307 })
5308 }
5309
5310
5311
5312
5313 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5314 if fn == nil {
5315 fatal("go of nil func value")
5316 }
5317
5318 mp := acquirem()
5319 pp := mp.p.ptr()
5320 newg := gfget(pp)
5321 if newg == nil {
5322 newg = malg(stackMin)
5323 casgstatus(newg, _Gidle, _Gdead)
5324 allgadd(newg)
5325 }
5326 if newg.stack.hi == 0 {
5327 throw("newproc1: newg missing stack")
5328 }
5329
5330 if readgstatus(newg) != _Gdead {
5331 throw("newproc1: new g is not Gdead")
5332 }
5333
5334 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5335 totalSize = alignUp(totalSize, sys.StackAlign)
5336 sp := newg.stack.hi - totalSize
5337 if usesLR {
5338
5339 *(*uintptr)(unsafe.Pointer(sp)) = 0
5340 prepGoExitFrame(sp)
5341 }
5342 if GOARCH == "arm64" {
5343
5344 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5345 }
5346
5347 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5348 newg.sched.sp = sp
5349 newg.stktopsp = sp
5350 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5351 newg.sched.g = guintptr(unsafe.Pointer(newg))
5352 gostartcallfn(&newg.sched, fn)
5353 newg.parentGoid = callergp.goid
5354 newg.gopc = callerpc
5355 newg.ancestors = saveAncestors(callergp)
5356 newg.startpc = fn.fn
5357 newg.runningCleanups.Store(false)
5358 if isSystemGoroutine(newg, false) {
5359 sched.ngsys.Add(1)
5360 } else {
5361
5362 newg.bubble = callergp.bubble
5363 if mp.curg != nil {
5364 newg.labels = mp.curg.labels
5365 }
5366 if goroutineProfile.active {
5367
5368
5369
5370
5371
5372 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5373 }
5374 }
5375
5376 newg.trackingSeq = uint8(cheaprand())
5377 if newg.trackingSeq%gTrackingPeriod == 0 {
5378 newg.tracking = true
5379 }
5380 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5381
5382
5383
5384 trace := traceAcquire()
5385 var status uint32 = _Grunnable
5386 if parked {
5387 status = _Gwaiting
5388 newg.waitreason = waitreason
5389 }
5390 if pp.goidcache == pp.goidcacheend {
5391
5392
5393
5394 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5395 pp.goidcache -= _GoidCacheBatch - 1
5396 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5397 }
5398 newg.goid = pp.goidcache
5399 casgstatus(newg, _Gdead, status)
5400 pp.goidcache++
5401 newg.trace.reset()
5402 if trace.ok() {
5403 trace.GoCreate(newg, newg.startpc, parked)
5404 traceRelease(trace)
5405 }
5406
5407
5408 newg.fipsOnlyBypass = callergp.fipsOnlyBypass
5409
5410
5411 newg.ditWanted = callergp.ditWanted
5412
5413
5414 if raceenabled {
5415 newg.racectx = racegostart(callerpc)
5416 newg.raceignore = 0
5417 if newg.labels != nil {
5418
5419
5420 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5421 }
5422 }
5423 pp.goroutinesCreated++
5424 releasem(mp)
5425
5426 return newg
5427 }
5428
5429
5430
5431
5432 func saveAncestors(callergp *g) *[]ancestorInfo {
5433
5434 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5435 return nil
5436 }
5437 var callerAncestors []ancestorInfo
5438 if callergp.ancestors != nil {
5439 callerAncestors = *callergp.ancestors
5440 }
5441 n := int32(len(callerAncestors)) + 1
5442 if n > debug.tracebackancestors {
5443 n = debug.tracebackancestors
5444 }
5445 ancestors := make([]ancestorInfo, n)
5446 copy(ancestors[1:], callerAncestors)
5447
5448 var pcs [tracebackInnerFrames]uintptr
5449 npcs := gcallers(callergp, 0, pcs[:])
5450 ipcs := make([]uintptr, npcs)
5451 copy(ipcs, pcs[:])
5452 ancestors[0] = ancestorInfo{
5453 pcs: ipcs,
5454 goid: callergp.goid,
5455 gopc: callergp.gopc,
5456 }
5457
5458 ancestorsp := new([]ancestorInfo)
5459 *ancestorsp = ancestors
5460 return ancestorsp
5461 }
5462
5463
5464
5465 func gfput(pp *p, gp *g) {
5466 if readgstatus(gp) != _Gdead {
5467 throw("gfput: bad status (not Gdead)")
5468 }
5469
5470 stksize := gp.stack.hi - gp.stack.lo
5471
5472 if stksize != uintptr(startingStackSize) {
5473
5474 stackfree(gp.stack)
5475 gp.stack.lo = 0
5476 gp.stack.hi = 0
5477 gp.stackguard0 = 0
5478 if valgrindenabled {
5479 valgrindDeregisterStack(gp.valgrindStackID)
5480 gp.valgrindStackID = 0
5481 }
5482 }
5483
5484 pp.gFree.push(gp)
5485 if pp.gFree.size >= 64 {
5486 var (
5487 stackQ gQueue
5488 noStackQ gQueue
5489 )
5490 for pp.gFree.size >= 32 {
5491 gp := pp.gFree.pop()
5492 if gp.stack.lo == 0 {
5493 noStackQ.push(gp)
5494 } else {
5495 stackQ.push(gp)
5496 }
5497 }
5498 lock(&sched.gFree.lock)
5499 sched.gFree.noStack.pushAll(noStackQ)
5500 sched.gFree.stack.pushAll(stackQ)
5501 unlock(&sched.gFree.lock)
5502 }
5503 }
5504
5505
5506
5507 func gfget(pp *p) *g {
5508 retry:
5509 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5510 lock(&sched.gFree.lock)
5511
5512 for pp.gFree.size < 32 {
5513
5514 gp := sched.gFree.stack.pop()
5515 if gp == nil {
5516 gp = sched.gFree.noStack.pop()
5517 if gp == nil {
5518 break
5519 }
5520 }
5521 pp.gFree.push(gp)
5522 }
5523 unlock(&sched.gFree.lock)
5524 goto retry
5525 }
5526 gp := pp.gFree.pop()
5527 if gp == nil {
5528 return nil
5529 }
5530 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5531
5532
5533
5534 systemstack(func() {
5535 stackfree(gp.stack)
5536 gp.stack.lo = 0
5537 gp.stack.hi = 0
5538 gp.stackguard0 = 0
5539 if valgrindenabled {
5540 valgrindDeregisterStack(gp.valgrindStackID)
5541 gp.valgrindStackID = 0
5542 }
5543 })
5544 }
5545 if gp.stack.lo == 0 {
5546
5547 systemstack(func() {
5548 gp.stack = stackalloc(startingStackSize)
5549 if valgrindenabled {
5550 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5551 }
5552 })
5553 gp.stackguard0 = gp.stack.lo + stackGuard
5554 } else {
5555 if raceenabled {
5556 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5557 }
5558 if msanenabled {
5559 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5560 }
5561 if asanenabled {
5562 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5563 }
5564 }
5565 return gp
5566 }
5567
5568
5569 func gfpurge(pp *p) {
5570 var (
5571 stackQ gQueue
5572 noStackQ gQueue
5573 )
5574 for !pp.gFree.empty() {
5575 gp := pp.gFree.pop()
5576 if gp.stack.lo == 0 {
5577 noStackQ.push(gp)
5578 } else {
5579 stackQ.push(gp)
5580 }
5581 }
5582 lock(&sched.gFree.lock)
5583 sched.gFree.noStack.pushAll(noStackQ)
5584 sched.gFree.stack.pushAll(stackQ)
5585 unlock(&sched.gFree.lock)
5586 }
5587
5588
5589 func Breakpoint() {
5590 breakpoint()
5591 }
5592
5593
5594
5595
5596
5597
5598 func dolockOSThread() {
5599 if GOARCH == "wasm" {
5600 return
5601 }
5602 gp := getg()
5603 gp.m.lockedg.set(gp)
5604 gp.lockedm.set(gp.m)
5605 }
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623 func LockOSThread() {
5624 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5625
5626
5627
5628 startTemplateThread()
5629 }
5630 gp := getg()
5631 gp.m.lockedExt++
5632 if gp.m.lockedExt == 0 {
5633 gp.m.lockedExt--
5634 panic("LockOSThread nesting overflow")
5635 }
5636 dolockOSThread()
5637 }
5638
5639
5640 func lockOSThread() {
5641 getg().m.lockedInt++
5642 dolockOSThread()
5643 }
5644
5645
5646
5647
5648
5649
5650 func dounlockOSThread() {
5651 if GOARCH == "wasm" {
5652 return
5653 }
5654 gp := getg()
5655 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5656 return
5657 }
5658 gp.m.lockedg = 0
5659 gp.lockedm = 0
5660 }
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676 func UnlockOSThread() {
5677 gp := getg()
5678 if gp.m.lockedExt == 0 {
5679 return
5680 }
5681 gp.m.lockedExt--
5682 dounlockOSThread()
5683 }
5684
5685
5686 func unlockOSThread() {
5687 gp := getg()
5688 if gp.m.lockedInt == 0 {
5689 systemstack(badunlockosthread)
5690 }
5691 gp.m.lockedInt--
5692 dounlockOSThread()
5693 }
5694
5695 func badunlockosthread() {
5696 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5697 }
5698
5699 func gcount(includeSys bool) int32 {
5700 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5701 if !includeSys {
5702 n -= sched.ngsys.Load()
5703 }
5704 for _, pp := range allp {
5705 n -= pp.gFree.size
5706 }
5707
5708
5709
5710 if n < 1 {
5711 n = 1
5712 }
5713 return n
5714 }
5715
5716
5717
5718
5719
5720 func goroutineleakcount() int {
5721 return work.goroutineLeak.count
5722 }
5723
5724 func mcount() int32 {
5725 return int32(sched.mnext - sched.nmfreed)
5726 }
5727
5728 var prof struct {
5729 signalLock atomic.Uint32
5730
5731
5732
5733 hz atomic.Int32
5734 }
5735
5736 func _System() { _System() }
5737 func _ExternalCode() { _ExternalCode() }
5738 func _LostExternalCode() { _LostExternalCode() }
5739 func _GC() { _GC() }
5740 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5741 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5742 func _VDSO() { _VDSO() }
5743
5744
5745
5746
5747
5748 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5749 if prof.hz.Load() == 0 {
5750 return
5751 }
5752
5753
5754
5755
5756 if mp != nil && mp.profilehz == 0 {
5757 return
5758 }
5759
5760
5761
5762
5763
5764
5765
5766 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5767 if f := findfunc(pc); f.valid() {
5768 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5769 cpuprof.lostAtomic++
5770 return
5771 }
5772 }
5773 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5774
5775
5776
5777 cpuprof.lostAtomic++
5778 return
5779 }
5780 }
5781
5782
5783
5784
5785
5786
5787
5788 getg().m.mallocing++
5789
5790 var u unwinder
5791 var stk [maxCPUProfStack]uintptr
5792 n := 0
5793 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5794 cgoOff := 0
5795
5796
5797
5798
5799
5800 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5801 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5802 cgoOff++
5803 }
5804 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5805 mp.cgoCallers[0] = 0
5806 }
5807
5808
5809 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5810 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5811
5812
5813 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5814 } else if mp != nil && mp.vdsoSP != 0 {
5815
5816
5817 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5818 } else {
5819 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5820 }
5821 n += tracebackPCs(&u, 0, stk[n:])
5822
5823 if n <= 0 {
5824
5825
5826 n = 2
5827 if inVDSOPage(pc) {
5828 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5829 } else if pc > firstmoduledata.etext {
5830
5831 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5832 }
5833 stk[0] = pc
5834 if mp.preemptoff != "" {
5835 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5836 } else {
5837 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5838 }
5839 }
5840
5841 if prof.hz.Load() != 0 {
5842
5843
5844
5845 var tagPtr *unsafe.Pointer
5846 if gp != nil && gp.m != nil && gp.m.curg != nil {
5847 tagPtr = &gp.m.curg.labels
5848 }
5849 cpuprof.add(tagPtr, stk[:n])
5850
5851 gprof := gp
5852 var mp *m
5853 var pp *p
5854 if gp != nil && gp.m != nil {
5855 if gp.m.curg != nil {
5856 gprof = gp.m.curg
5857 }
5858 mp = gp.m
5859 pp = gp.m.p.ptr()
5860 }
5861 traceCPUSample(gprof, mp, pp, stk[:n])
5862 }
5863 getg().m.mallocing--
5864 }
5865
5866
5867
5868 func setcpuprofilerate(hz int32) {
5869
5870 if hz < 0 {
5871 hz = 0
5872 }
5873
5874
5875
5876 gp := getg()
5877 gp.m.locks++
5878
5879
5880
5881
5882 setThreadCPUProfiler(0)
5883
5884 for !prof.signalLock.CompareAndSwap(0, 1) {
5885 osyield()
5886 }
5887 if prof.hz.Load() != hz {
5888 setProcessCPUProfiler(hz)
5889 prof.hz.Store(hz)
5890 }
5891 prof.signalLock.Store(0)
5892
5893 lock(&sched.lock)
5894 sched.profilehz = hz
5895 unlock(&sched.lock)
5896
5897 if hz != 0 {
5898 setThreadCPUProfiler(hz)
5899 }
5900
5901 gp.m.locks--
5902 }
5903
5904
5905
5906 func (pp *p) init(id int32) {
5907 pp.id = id
5908 pp.gcw.id = id
5909 pp.status = _Pgcstop
5910 pp.sudogcache = pp.sudogbuf[:0]
5911 pp.deferpool = pp.deferpoolbuf[:0]
5912 pp.wbBuf.reset()
5913 if pp.mcache == nil {
5914 if id == 0 {
5915 if mcache0 == nil {
5916 throw("missing mcache?")
5917 }
5918
5919
5920 pp.mcache = mcache0
5921 } else {
5922 pp.mcache = allocmcache()
5923 }
5924 }
5925 if raceenabled && pp.raceprocctx == 0 {
5926 if id == 0 {
5927 pp.raceprocctx = raceprocctx0
5928 raceprocctx0 = 0
5929 } else {
5930 pp.raceprocctx = raceproccreate()
5931 }
5932 }
5933 lockInit(&pp.timers.mu, lockRankTimers)
5934
5935
5936
5937 timerpMask.set(id)
5938
5939
5940 idlepMask.clear(id)
5941 }
5942
5943
5944
5945
5946
5947 func (pp *p) destroy() {
5948 assertLockHeld(&sched.lock)
5949 assertWorldStopped()
5950
5951
5952 for pp.runqhead != pp.runqtail {
5953
5954 pp.runqtail--
5955 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5956
5957 globrunqputhead(gp)
5958 }
5959 if pp.runnext != 0 {
5960 globrunqputhead(pp.runnext.ptr())
5961 pp.runnext = 0
5962 }
5963
5964
5965 getg().m.p.ptr().timers.take(&pp.timers)
5966
5967
5968
5969 if phase := gcphase; phase != _GCoff {
5970 println("runtime: p id", pp.id, "destroyed during GC phase", phase)
5971 throw("P destroyed while GC is running")
5972 }
5973
5974 pp.gcw.spanq.destroy()
5975
5976 clear(pp.sudogbuf[:])
5977 pp.sudogcache = pp.sudogbuf[:0]
5978 pp.pinnerCache = nil
5979 clear(pp.deferpoolbuf[:])
5980 pp.deferpool = pp.deferpoolbuf[:0]
5981 systemstack(func() {
5982 for i := 0; i < pp.mspancache.len; i++ {
5983
5984 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5985 }
5986 pp.mspancache.len = 0
5987 lock(&mheap_.lock)
5988 pp.pcache.flush(&mheap_.pages)
5989 unlock(&mheap_.lock)
5990 })
5991 freemcache(pp.mcache)
5992 pp.mcache = nil
5993 gfpurge(pp)
5994 if raceenabled {
5995 if pp.timers.raceCtx != 0 {
5996
5997
5998
5999
6000
6001 mp := getg().m
6002 phold := mp.p.ptr()
6003 mp.p.set(pp)
6004
6005 racectxend(pp.timers.raceCtx)
6006 pp.timers.raceCtx = 0
6007
6008 mp.p.set(phold)
6009 }
6010 raceprocdestroy(pp.raceprocctx)
6011 pp.raceprocctx = 0
6012 }
6013 pp.gcAssistTime = 0
6014 gcCleanups.queued += pp.cleanupsQueued
6015 pp.cleanupsQueued = 0
6016 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
6017 pp.goroutinesCreated = 0
6018 pp.xRegs.free()
6019 pp.status = _Pdead
6020 }
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030 func procresize(nprocs int32) *p {
6031 assertLockHeld(&sched.lock)
6032 assertWorldStopped()
6033
6034 old := gomaxprocs
6035 if old < 0 || nprocs <= 0 {
6036 throw("procresize: invalid arg")
6037 }
6038 trace := traceAcquire()
6039 if trace.ok() {
6040 trace.Gomaxprocs(nprocs)
6041 traceRelease(trace)
6042 }
6043
6044
6045 now := nanotime()
6046 if sched.procresizetime != 0 {
6047 sched.totaltime += int64(old) * (now - sched.procresizetime)
6048 }
6049 sched.procresizetime = now
6050
6051
6052 if nprocs > int32(len(allp)) {
6053
6054
6055 lock(&allpLock)
6056 if nprocs <= int32(cap(allp)) {
6057 allp = allp[:nprocs]
6058 } else {
6059 nallp := make([]*p, nprocs)
6060
6061
6062 copy(nallp, allp[:cap(allp)])
6063 allp = nallp
6064 }
6065
6066 idlepMask = idlepMask.resize(nprocs)
6067 timerpMask = timerpMask.resize(nprocs)
6068 work.spanqMask = work.spanqMask.resize(nprocs)
6069 unlock(&allpLock)
6070 }
6071
6072
6073 for i := old; i < nprocs; i++ {
6074 pp := allp[i]
6075 if pp == nil {
6076 pp = new(p)
6077 }
6078 pp.init(i)
6079 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
6080 }
6081
6082 gp := getg()
6083 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
6084
6085 gp.m.p.ptr().status = _Prunning
6086 gp.m.p.ptr().mcache.prepareForSweep()
6087 } else {
6088
6089
6090
6091
6092
6093 if gp.m.p != 0 {
6094 trace := traceAcquire()
6095 if trace.ok() {
6096
6097
6098
6099 trace.GoSched()
6100 trace.ProcStop(gp.m.p.ptr())
6101 traceRelease(trace)
6102 }
6103 gp.m.p.ptr().m = 0
6104 }
6105 gp.m.p = 0
6106 pp := allp[0]
6107 pp.m = 0
6108 pp.status = _Pidle
6109 acquirep(pp)
6110 trace := traceAcquire()
6111 if trace.ok() {
6112 trace.GoStart()
6113 traceRelease(trace)
6114 }
6115 }
6116
6117
6118 mcache0 = nil
6119
6120
6121 for i := nprocs; i < old; i++ {
6122 pp := allp[i]
6123 pp.destroy()
6124
6125 }
6126
6127
6128 if int32(len(allp)) != nprocs {
6129 lock(&allpLock)
6130 allp = allp[:nprocs]
6131 idlepMask = idlepMask.resize(nprocs)
6132 timerpMask = timerpMask.resize(nprocs)
6133 work.spanqMask = work.spanqMask.resize(nprocs)
6134 unlock(&allpLock)
6135 }
6136
6137
6138 var runnablePs *p
6139 var runnablePsNeedM *p
6140 var idlePs *p
6141 for i := nprocs - 1; i >= 0; i-- {
6142 pp := allp[i]
6143 if gp.m.p.ptr() == pp {
6144 continue
6145 }
6146 pp.status = _Pidle
6147 if runqempty(pp) {
6148 pp.link.set(idlePs)
6149 idlePs = pp
6150 continue
6151 }
6152
6153
6154
6155
6156
6157
6158
6159
6160 var mp *m
6161 if oldm := pp.oldm.get(); oldm != nil {
6162
6163 mp = mgetSpecific(oldm)
6164 }
6165 if mp == nil {
6166
6167 pp.link.set(runnablePsNeedM)
6168 runnablePsNeedM = pp
6169 continue
6170 }
6171 pp.m.set(mp)
6172 pp.link.set(runnablePs)
6173 runnablePs = pp
6174 }
6175
6176
6177 for runnablePsNeedM != nil {
6178 pp := runnablePsNeedM
6179 runnablePsNeedM = pp.link.ptr()
6180
6181 mp := mget()
6182 pp.m.set(mp)
6183 pp.link.set(runnablePs)
6184 runnablePs = pp
6185 }
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211 if gcBlackenEnabled != 0 {
6212 for idlePs != nil {
6213 pp := idlePs
6214
6215 ok, _ := gcController.assignWaitingGCWorker(pp, now)
6216 if !ok {
6217
6218 break
6219 }
6220
6221
6222
6223
6224
6225
6226
6227
6228 idlePs = pp.link.ptr()
6229 mp := mget()
6230 pp.m.set(mp)
6231 pp.link.set(runnablePs)
6232 runnablePs = pp
6233 }
6234 }
6235
6236
6237 for idlePs != nil {
6238 pp := idlePs
6239 idlePs = pp.link.ptr()
6240 pidleput(pp, now)
6241 }
6242
6243 stealOrder.reset(uint32(nprocs))
6244 var int32p *int32 = &gomaxprocs
6245 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
6246 if old != nprocs {
6247
6248 gcCPULimiter.resetCapacity(now, nprocs)
6249 }
6250 return runnablePs
6251 }
6252
6253
6254
6255
6256
6257
6258
6259 func acquirep(pp *p) {
6260
6261 acquirepNoTrace(pp)
6262
6263
6264 trace := traceAcquire()
6265 if trace.ok() {
6266 trace.ProcStart()
6267 traceRelease(trace)
6268 }
6269 }
6270
6271
6272
6273
6274 func acquirepNoTrace(pp *p) {
6275
6276 wirep(pp)
6277
6278
6279
6280
6281
6282
6283 pp.oldm = pp.m.ptr().self
6284
6285
6286
6287 pp.mcache.prepareForSweep()
6288 }
6289
6290
6291
6292
6293
6294
6295
6296 func wirep(pp *p) {
6297 gp := getg()
6298
6299 if gp.m.p != 0 {
6300
6301
6302 systemstack(func() {
6303 throw("wirep: already in go")
6304 })
6305 }
6306 if pp.m != 0 || pp.status != _Pidle {
6307
6308
6309 systemstack(func() {
6310 id := int64(0)
6311 if pp.m != 0 {
6312 id = pp.m.ptr().id
6313 }
6314 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6315 throw("wirep: invalid p state")
6316 })
6317 }
6318 gp.m.p.set(pp)
6319 pp.m.set(gp.m)
6320 pp.status = _Prunning
6321 }
6322
6323
6324 func releasep() *p {
6325 trace := traceAcquire()
6326 if trace.ok() {
6327 trace.ProcStop(getg().m.p.ptr())
6328 traceRelease(trace)
6329 }
6330 return releasepNoTrace()
6331 }
6332
6333
6334 func releasepNoTrace() *p {
6335 gp := getg()
6336
6337 if gp.m.p == 0 {
6338 throw("releasep: invalid arg")
6339 }
6340 pp := gp.m.p.ptr()
6341 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6342 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6343 throw("releasep: invalid p state")
6344 }
6345
6346
6347 gcController.releaseNextGCMarkWorker(pp)
6348
6349 gp.m.p = 0
6350 pp.m = 0
6351 pp.status = _Pidle
6352 return pp
6353 }
6354
6355 func incidlelocked(v int32) {
6356 lock(&sched.lock)
6357 sched.nmidlelocked += v
6358 if v > 0 {
6359 checkdead()
6360 }
6361 unlock(&sched.lock)
6362 }
6363
6364
6365
6366
6367 func checkdead() {
6368 assertLockHeld(&sched.lock)
6369
6370
6371
6372
6373
6374
6375 if (islibrary || isarchive) && GOARCH != "wasm" {
6376 return
6377 }
6378
6379
6380
6381
6382
6383 if panicking.Load() > 0 {
6384 return
6385 }
6386
6387
6388
6389
6390
6391 var run0 int32
6392 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6393 run0 = 1
6394 }
6395
6396 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6397 if run > run0 {
6398 return
6399 }
6400 if run < 0 {
6401 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6402 unlock(&sched.lock)
6403 throw("checkdead: inconsistent counts")
6404 }
6405
6406 grunning := 0
6407 forEachG(func(gp *g) {
6408 if isSystemGoroutine(gp, false) {
6409 return
6410 }
6411 s := readgstatus(gp)
6412 switch s &^ _Gscan {
6413 case _Gwaiting,
6414 _Gpreempted:
6415 grunning++
6416 case _Grunnable,
6417 _Grunning,
6418 _Gsyscall:
6419 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6420 unlock(&sched.lock)
6421 throw("checkdead: runnable g")
6422 }
6423 })
6424 if grunning == 0 {
6425 unlock(&sched.lock)
6426 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6427 }
6428
6429
6430 if faketime != 0 {
6431 if when := timeSleepUntil(); when < maxWhen {
6432 faketime = when
6433
6434
6435 pp, _ := pidleget(faketime)
6436 if pp == nil {
6437
6438
6439 unlock(&sched.lock)
6440 throw("checkdead: no p for timer")
6441 }
6442 mp := mget()
6443 if mp == nil {
6444
6445
6446 unlock(&sched.lock)
6447 throw("checkdead: no m for timer")
6448 }
6449
6450
6451
6452 sched.nmspinning.Add(1)
6453 mp.spinning = true
6454 mp.nextp.set(pp)
6455 notewakeup(&mp.park)
6456 return
6457 }
6458 }
6459
6460
6461 for _, pp := range allp {
6462 if len(pp.timers.heap) > 0 {
6463 return
6464 }
6465 }
6466
6467 unlock(&sched.lock)
6468 fatal("all goroutines are asleep - deadlock!")
6469 }
6470
6471
6472
6473
6474
6475
6476 var forcegcperiod int64 = 2 * 60 * 1e9
6477
6478
6479
6480
6481 const haveSysmon = GOARCH != "wasm"
6482
6483
6484
6485
6486 func sysmon() {
6487 lock(&sched.lock)
6488 sched.nmsys++
6489 checkdead()
6490 unlock(&sched.lock)
6491
6492 lastgomaxprocs := int64(0)
6493 lasttrace := int64(0)
6494 idle := 0
6495 delay := uint32(0)
6496
6497 for {
6498 if idle == 0 {
6499 delay = 20
6500 } else if idle > 50 {
6501 delay *= 2
6502 }
6503 if delay > 10*1000 {
6504 delay = 10 * 1000
6505 }
6506 usleep(delay)
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523 now := nanotime()
6524 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6525 lock(&sched.lock)
6526 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6527 syscallWake := false
6528 next := timeSleepUntil()
6529 if next > now {
6530 sched.sysmonwait.Store(true)
6531 unlock(&sched.lock)
6532
6533
6534 sleep := forcegcperiod / 2
6535 if next-now < sleep {
6536 sleep = next - now
6537 }
6538 shouldRelax := sleep >= osRelaxMinNS
6539 if shouldRelax {
6540 osRelax(true)
6541 }
6542 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6543 if shouldRelax {
6544 osRelax(false)
6545 }
6546 lock(&sched.lock)
6547 sched.sysmonwait.Store(false)
6548 noteclear(&sched.sysmonnote)
6549 }
6550 if syscallWake {
6551 idle = 0
6552 delay = 20
6553 }
6554 }
6555 unlock(&sched.lock)
6556 }
6557
6558 lock(&sched.sysmonlock)
6559
6560
6561 now = nanotime()
6562
6563
6564 if *cgo_yield != nil {
6565 asmcgocall(*cgo_yield, nil)
6566 }
6567
6568 lastpoll := sched.lastpoll.Load()
6569 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6570 sched.lastpoll.CompareAndSwap(lastpoll, now)
6571 list, delta := netpoll(0)
6572 if !list.empty() {
6573
6574
6575
6576
6577
6578
6579
6580 incidlelocked(-1)
6581 injectglist(&list)
6582 incidlelocked(1)
6583 netpollAdjustWaiters(delta)
6584 }
6585 }
6586
6587 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6588 sysmonUpdateGOMAXPROCS()
6589 lastgomaxprocs = now
6590 }
6591 if scavenger.sysmonWake.Load() != 0 {
6592
6593 scavenger.wake()
6594 }
6595
6596
6597 if retake(now) != 0 {
6598 idle = 0
6599 } else {
6600 idle++
6601 }
6602
6603 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6604 lock(&forcegc.lock)
6605 forcegc.idle.Store(false)
6606 var list gList
6607 list.push(forcegc.g)
6608 injectglist(&list)
6609 unlock(&forcegc.lock)
6610 }
6611 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6612 lasttrace = now
6613 schedtrace(debug.scheddetail > 0)
6614 }
6615 unlock(&sched.sysmonlock)
6616 }
6617 }
6618
6619 type sysmontick struct {
6620 schedtick uint32
6621 syscalltick uint32
6622 schedwhen int64
6623 syscallwhen int64
6624 }
6625
6626
6627
6628 const forcePreemptNS = 10 * 1000 * 1000
6629
6630 func retake(now int64) uint32 {
6631 n := 0
6632
6633
6634 lock(&allpLock)
6635
6636
6637
6638 for i := 0; i < len(allp); i++ {
6639
6640
6641
6642
6643
6644
6645
6646
6647 pp := allp[i]
6648 if pp == nil || atomic.Load(&pp.status) != _Prunning {
6649
6650
6651 continue
6652 }
6653 pd := &pp.sysmontick
6654 sysretake := false
6655
6656
6657
6658
6659
6660 schedt := int64(pp.schedtick)
6661 if int64(pd.schedtick) != schedt {
6662 pd.schedtick = uint32(schedt)
6663 pd.schedwhen = now
6664 } else if pd.schedwhen+forcePreemptNS <= now {
6665 preemptone(pp)
6666
6667
6668
6669
6670 sysretake = true
6671 }
6672
6673
6674 unlock(&allpLock)
6675
6676
6677
6678
6679
6680
6681
6682
6683 incidlelocked(-1)
6684
6685
6686 thread, ok := setBlockOnExitSyscall(pp)
6687 if !ok {
6688
6689 goto done
6690 }
6691
6692
6693 if syst := int64(pp.syscalltick); !sysretake && int64(pd.syscalltick) != syst {
6694 pd.syscalltick = uint32(syst)
6695 pd.syscallwhen = now
6696 thread.resume()
6697 goto done
6698 }
6699
6700
6701
6702
6703 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6704 thread.resume()
6705 goto done
6706 }
6707
6708
6709
6710 thread.takeP()
6711 thread.resume()
6712 n++
6713
6714
6715 handoffp(pp)
6716
6717
6718
6719 done:
6720 incidlelocked(1)
6721 lock(&allpLock)
6722 }
6723 unlock(&allpLock)
6724 return uint32(n)
6725 }
6726
6727
6728
6729 type syscallingThread struct {
6730 gp *g
6731 mp *m
6732 pp *p
6733 status uint32
6734 }
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750 func setBlockOnExitSyscall(pp *p) (syscallingThread, bool) {
6751 if pp.status != _Prunning {
6752 return syscallingThread{}, false
6753 }
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765 mp := pp.m.ptr()
6766 if mp == nil {
6767
6768 return syscallingThread{}, false
6769 }
6770 gp := mp.curg
6771 if gp == nil {
6772
6773 return syscallingThread{}, false
6774 }
6775 status := readgstatus(gp) &^ _Gscan
6776
6777
6778
6779
6780 if status != _Gsyscall && status != _Gdeadextra {
6781
6782 return syscallingThread{}, false
6783 }
6784 if !castogscanstatus(gp, status, status|_Gscan) {
6785
6786 return syscallingThread{}, false
6787 }
6788 if gp.m != mp || gp.m.p.ptr() != pp {
6789
6790 casfrom_Gscanstatus(gp, status|_Gscan, status)
6791 return syscallingThread{}, false
6792 }
6793 return syscallingThread{gp, mp, pp, status}, true
6794 }
6795
6796
6797
6798
6799
6800 func (s syscallingThread) gcstopP() {
6801 assertLockHeld(&sched.lock)
6802
6803 s.releaseP(_Pgcstop)
6804 s.pp.gcStopTime = nanotime()
6805 sched.stopwait--
6806 }
6807
6808
6809
6810 func (s syscallingThread) takeP() {
6811 s.releaseP(_Pidle)
6812 }
6813
6814
6815
6816
6817 func (s syscallingThread) releaseP(state uint32) {
6818 if state != _Pidle && state != _Pgcstop {
6819 throw("attempted to release P into a bad state")
6820 }
6821 trace := traceAcquire()
6822 s.pp.m = 0
6823 s.mp.p = 0
6824 atomic.Store(&s.pp.status, state)
6825 if trace.ok() {
6826 trace.ProcSteal(s.pp)
6827 traceRelease(trace)
6828 }
6829 addGSyscallNoP(s.mp)
6830 s.pp.syscalltick++
6831 }
6832
6833
6834 func (s syscallingThread) resume() {
6835 casfrom_Gscanstatus(s.gp, s.status|_Gscan, s.status)
6836 }
6837
6838
6839
6840
6841
6842
6843 func preemptall() bool {
6844 res := false
6845 for _, pp := range allp {
6846 if pp.status != _Prunning {
6847 continue
6848 }
6849 if preemptone(pp) {
6850 res = true
6851 }
6852 }
6853 return res
6854 }
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866 func preemptone(pp *p) bool {
6867 mp := pp.m.ptr()
6868 if mp == nil || mp == getg().m {
6869 return false
6870 }
6871 gp := mp.curg
6872 if gp == nil || gp == mp.g0 {
6873 return false
6874 }
6875 if readgstatus(gp)&^_Gscan == _Gsyscall {
6876
6877 return false
6878 }
6879
6880 gp.preempt = true
6881
6882
6883
6884
6885
6886 gp.stackguard0 = stackPreempt
6887
6888
6889 if preemptMSupported && debug.asyncpreemptoff == 0 {
6890 pp.preempt = true
6891 preemptM(mp)
6892 }
6893
6894 return true
6895 }
6896
6897 var starttime int64
6898
6899 func schedtrace(detailed bool) {
6900 now := nanotime()
6901 if starttime == 0 {
6902 starttime = now
6903 }
6904
6905 lock(&sched.lock)
6906 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6907 if detailed {
6908 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6909 }
6910
6911
6912
6913 for i, pp := range allp {
6914 h := atomic.Load(&pp.runqhead)
6915 t := atomic.Load(&pp.runqtail)
6916 if detailed {
6917 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6918 mp := pp.m.ptr()
6919 if mp != nil {
6920 print(mp.id)
6921 } else {
6922 print("nil")
6923 }
6924 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6925 } else {
6926
6927
6928 print(" ")
6929 if i == 0 {
6930 print("[ ")
6931 }
6932 print(t - h)
6933 if i == len(allp)-1 {
6934 print(" ]")
6935 }
6936 }
6937 }
6938
6939 if !detailed {
6940
6941 print(" schedticks=[ ")
6942 for _, pp := range allp {
6943 print(pp.schedtick)
6944 print(" ")
6945 }
6946 print("]\n")
6947 }
6948
6949 if !detailed {
6950 unlock(&sched.lock)
6951 return
6952 }
6953
6954 for mp := allm; mp != nil; mp = mp.alllink {
6955 pp := mp.p.ptr()
6956 print(" M", mp.id, ": p=")
6957 if pp != nil {
6958 print(pp.id)
6959 } else {
6960 print("nil")
6961 }
6962 print(" curg=")
6963 if mp.curg != nil {
6964 print(mp.curg.goid)
6965 } else {
6966 print("nil")
6967 }
6968 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6969 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6970 print(lockedg.goid)
6971 } else {
6972 print("nil")
6973 }
6974 print("\n")
6975 }
6976
6977 forEachG(func(gp *g) {
6978 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6979 if gp.m != nil {
6980 print(gp.m.id)
6981 } else {
6982 print("nil")
6983 }
6984 print(" lockedm=")
6985 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6986 print(lockedm.id)
6987 } else {
6988 print("nil")
6989 }
6990 print("\n")
6991 })
6992 unlock(&sched.lock)
6993 }
6994
6995 type updateMaxProcsGState struct {
6996 lock mutex
6997 g *g
6998 idle atomic.Bool
6999
7000
7001 procs int32
7002 }
7003
7004 var (
7005
7006
7007 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
7008
7009
7010
7011 updateMaxProcsG updateMaxProcsGState
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060 computeMaxProcsLock mutex
7061 )
7062
7063
7064
7065
7066 func defaultGOMAXPROCSUpdateEnable() {
7067 if debug.updatemaxprocs == 0 {
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079 updatemaxprocs.IncNonDefault()
7080 return
7081 }
7082
7083 go updateMaxProcsGoroutine()
7084 }
7085
7086 func updateMaxProcsGoroutine() {
7087 updateMaxProcsG.g = getg()
7088 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
7089 for {
7090 lock(&updateMaxProcsG.lock)
7091 if updateMaxProcsG.idle.Load() {
7092 throw("updateMaxProcsGoroutine: phase error")
7093 }
7094 updateMaxProcsG.idle.Store(true)
7095 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
7096
7097
7098 stw := stopTheWorldGC(stwGOMAXPROCS)
7099
7100
7101 lock(&sched.lock)
7102 custom := sched.customGOMAXPROCS
7103 unlock(&sched.lock)
7104 if custom {
7105 startTheWorldGC(stw)
7106 return
7107 }
7108
7109
7110
7111
7112
7113 newprocs = updateMaxProcsG.procs
7114 lock(&sched.lock)
7115 sched.customGOMAXPROCS = false
7116 unlock(&sched.lock)
7117
7118 startTheWorldGC(stw)
7119 }
7120 }
7121
7122 func sysmonUpdateGOMAXPROCS() {
7123
7124 lock(&computeMaxProcsLock)
7125
7126
7127 lock(&sched.lock)
7128 custom := sched.customGOMAXPROCS
7129 curr := gomaxprocs
7130 unlock(&sched.lock)
7131 if custom {
7132 unlock(&computeMaxProcsLock)
7133 return
7134 }
7135
7136
7137 procs := defaultGOMAXPROCS(0)
7138 unlock(&computeMaxProcsLock)
7139 if procs == curr {
7140
7141 return
7142 }
7143
7144
7145
7146
7147 if updateMaxProcsG.idle.Load() {
7148 lock(&updateMaxProcsG.lock)
7149 updateMaxProcsG.procs = procs
7150 updateMaxProcsG.idle.Store(false)
7151 var list gList
7152 list.push(updateMaxProcsG.g)
7153 injectglist(&list)
7154 unlock(&updateMaxProcsG.lock)
7155 }
7156 }
7157
7158
7159
7160
7161
7162
7163 func schedEnableUser(enable bool) {
7164 lock(&sched.lock)
7165 if sched.disable.user == !enable {
7166 unlock(&sched.lock)
7167 return
7168 }
7169 sched.disable.user = !enable
7170 if enable {
7171 n := sched.disable.runnable.size
7172 globrunqputbatch(&sched.disable.runnable)
7173 unlock(&sched.lock)
7174 for ; n != 0 && sched.npidle.Load() != 0; n-- {
7175 startm(nil, false, false)
7176 }
7177 } else {
7178 unlock(&sched.lock)
7179 }
7180 }
7181
7182
7183
7184
7185
7186 func schedEnabled(gp *g) bool {
7187 assertLockHeld(&sched.lock)
7188
7189 if sched.disable.user {
7190 return isSystemGoroutine(gp, true)
7191 }
7192 return true
7193 }
7194
7195
7196
7197
7198
7199
7200 func mput(mp *m) {
7201 assertLockHeld(&sched.lock)
7202
7203 sched.midle.push(unsafe.Pointer(mp))
7204 sched.nmidle++
7205 checkdead()
7206 }
7207
7208
7209
7210
7211
7212
7213 func mget() *m {
7214 assertLockHeld(&sched.lock)
7215
7216 mp := (*m)(sched.midle.pop())
7217 if mp != nil {
7218 sched.nmidle--
7219 }
7220 return mp
7221 }
7222
7223
7224
7225
7226
7227
7228
7229
7230 func mgetSpecific(mp *m) *m {
7231 assertLockHeld(&sched.lock)
7232
7233 if mp.idleNode.prev == 0 && mp.idleNode.next == 0 {
7234
7235 return nil
7236 }
7237
7238 sched.midle.remove(unsafe.Pointer(mp))
7239 sched.nmidle--
7240
7241 return mp
7242 }
7243
7244
7245
7246
7247
7248
7249 func globrunqput(gp *g) {
7250 assertLockHeld(&sched.lock)
7251
7252 sched.runq.pushBack(gp)
7253 }
7254
7255
7256
7257
7258
7259
7260 func globrunqputhead(gp *g) {
7261 assertLockHeld(&sched.lock)
7262
7263 sched.runq.push(gp)
7264 }
7265
7266
7267
7268
7269
7270
7271
7272 func globrunqputbatch(batch *gQueue) {
7273 assertLockHeld(&sched.lock)
7274
7275 sched.runq.pushBackAll(*batch)
7276 *batch = gQueue{}
7277 }
7278
7279
7280
7281 func globrunqget() *g {
7282 assertLockHeld(&sched.lock)
7283
7284 if sched.runq.size == 0 {
7285 return nil
7286 }
7287
7288 return sched.runq.pop()
7289 }
7290
7291
7292
7293 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
7294 assertLockHeld(&sched.lock)
7295
7296 if sched.runq.size == 0 {
7297 return
7298 }
7299
7300 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
7301
7302 gp = sched.runq.pop()
7303 n--
7304
7305 for ; n > 0; n-- {
7306 gp1 := sched.runq.pop()
7307 q.pushBack(gp1)
7308 }
7309 return
7310 }
7311
7312
7313 type pMask []uint32
7314
7315
7316 func (p pMask) read(id uint32) bool {
7317 word := id / 32
7318 mask := uint32(1) << (id % 32)
7319 return (atomic.Load(&p[word]) & mask) != 0
7320 }
7321
7322
7323 func (p pMask) set(id int32) {
7324 word := id / 32
7325 mask := uint32(1) << (id % 32)
7326 atomic.Or(&p[word], mask)
7327 }
7328
7329
7330 func (p pMask) clear(id int32) {
7331 word := id / 32
7332 mask := uint32(1) << (id % 32)
7333 atomic.And(&p[word], ^mask)
7334 }
7335
7336
7337 func (p pMask) any() bool {
7338 for i := range p {
7339 if atomic.Load(&p[i]) != 0 {
7340 return true
7341 }
7342 }
7343 return false
7344 }
7345
7346
7347
7348
7349
7350 func (p pMask) resize(nprocs int32) pMask {
7351 maskWords := (nprocs + 31) / 32
7352
7353 if maskWords <= int32(cap(p)) {
7354 return p[:maskWords]
7355 }
7356 newMask := make([]uint32, maskWords)
7357
7358 copy(newMask, p)
7359 return newMask
7360 }
7361
7362
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373 func pidleput(pp *p, now int64) int64 {
7374 assertLockHeld(&sched.lock)
7375
7376 if !runqempty(pp) {
7377 throw("pidleput: P has non-empty run queue")
7378 }
7379 if now == 0 {
7380 now = nanotime()
7381 }
7382 if pp.timers.len.Load() == 0 {
7383 timerpMask.clear(pp.id)
7384 }
7385 idlepMask.set(pp.id)
7386 pp.link = sched.pidle
7387 sched.pidle.set(pp)
7388 sched.npidle.Add(1)
7389 if !pp.limiterEvent.start(limiterEventIdle, now) {
7390 throw("must be able to track idle limiter event")
7391 }
7392 return now
7393 }
7394
7395
7396
7397
7398
7399
7400
7401
7402 func pidleget(now int64) (*p, int64) {
7403 assertLockHeld(&sched.lock)
7404
7405 pp := sched.pidle.ptr()
7406 if pp != nil {
7407
7408 if now == 0 {
7409 now = nanotime()
7410 }
7411 timerpMask.set(pp.id)
7412 idlepMask.clear(pp.id)
7413 sched.pidle = pp.link
7414 sched.npidle.Add(-1)
7415 pp.limiterEvent.stop(limiterEventIdle, now)
7416 }
7417 return pp, now
7418 }
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430 func pidlegetSpinning(now int64) (*p, int64) {
7431 assertLockHeld(&sched.lock)
7432
7433 pp, now := pidleget(now)
7434 if pp == nil {
7435
7436
7437
7438 sched.needspinning.Store(1)
7439 return nil, now
7440 }
7441
7442 return pp, now
7443 }
7444
7445
7446
7447 func runqempty(pp *p) bool {
7448
7449
7450
7451
7452 for {
7453 head := atomic.Load(&pp.runqhead)
7454 tail := atomic.Load(&pp.runqtail)
7455 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7456 if tail == atomic.Load(&pp.runqtail) {
7457 return head == tail && runnext == 0
7458 }
7459 }
7460 }
7461
7462
7463
7464
7465
7466
7467
7468
7469
7470
7471 const randomizeScheduler = raceenabled
7472
7473
7474
7475
7476
7477
7478 func runqput(pp *p, gp *g, next bool) {
7479 if !haveSysmon && next {
7480
7481
7482
7483
7484
7485
7486
7487
7488 next = false
7489 }
7490 if randomizeScheduler && next && randn(2) == 0 {
7491 next = false
7492 }
7493
7494 if next {
7495 retryNext:
7496 oldnext := pp.runnext
7497 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7498 goto retryNext
7499 }
7500 if oldnext == 0 {
7501 return
7502 }
7503
7504 gp = oldnext.ptr()
7505 }
7506
7507 retry:
7508 h := atomic.LoadAcq(&pp.runqhead)
7509 t := pp.runqtail
7510 if t-h < uint32(len(pp.runq)) {
7511 pp.runq[t%uint32(len(pp.runq))].set(gp)
7512 atomic.StoreRel(&pp.runqtail, t+1)
7513 return
7514 }
7515 if runqputslow(pp, gp, h, t) {
7516 return
7517 }
7518
7519 goto retry
7520 }
7521
7522
7523
7524 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7525 var batch [len(pp.runq)/2 + 1]*g
7526
7527
7528 n := t - h
7529 n = n / 2
7530 if n != uint32(len(pp.runq)/2) {
7531 throw("runqputslow: queue is not full")
7532 }
7533 for i := uint32(0); i < n; i++ {
7534 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7535 }
7536 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7537 return false
7538 }
7539 batch[n] = gp
7540
7541 if randomizeScheduler {
7542 for i := uint32(1); i <= n; i++ {
7543 j := cheaprandn(i + 1)
7544 batch[i], batch[j] = batch[j], batch[i]
7545 }
7546 }
7547
7548
7549 for i := uint32(0); i < n; i++ {
7550 batch[i].schedlink.set(batch[i+1])
7551 }
7552
7553 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7554
7555
7556 lock(&sched.lock)
7557 globrunqputbatch(&q)
7558 unlock(&sched.lock)
7559 return true
7560 }
7561
7562
7563
7564
7565 func runqputbatch(pp *p, q *gQueue) {
7566 if q.empty() {
7567 return
7568 }
7569 h := atomic.LoadAcq(&pp.runqhead)
7570 t := pp.runqtail
7571 n := uint32(0)
7572 for !q.empty() && t-h < uint32(len(pp.runq)) {
7573 gp := q.pop()
7574 pp.runq[t%uint32(len(pp.runq))].set(gp)
7575 t++
7576 n++
7577 }
7578
7579 if randomizeScheduler {
7580 off := func(o uint32) uint32 {
7581 return (pp.runqtail + o) % uint32(len(pp.runq))
7582 }
7583 for i := uint32(1); i < n; i++ {
7584 j := cheaprandn(i + 1)
7585 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7586 }
7587 }
7588
7589 atomic.StoreRel(&pp.runqtail, t)
7590
7591 return
7592 }
7593
7594
7595
7596
7597
7598 func runqget(pp *p) (gp *g, inheritTime bool) {
7599
7600 next := pp.runnext
7601
7602
7603
7604 if next != 0 && pp.runnext.cas(next, 0) {
7605 return next.ptr(), true
7606 }
7607
7608 for {
7609 h := atomic.LoadAcq(&pp.runqhead)
7610 t := pp.runqtail
7611 if t == h {
7612 return nil, false
7613 }
7614 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7615 if atomic.CasRel(&pp.runqhead, h, h+1) {
7616 return gp, false
7617 }
7618 }
7619 }
7620
7621
7622
7623 func runqdrain(pp *p) (drainQ gQueue) {
7624 oldNext := pp.runnext
7625 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7626 drainQ.pushBack(oldNext.ptr())
7627 }
7628
7629 retry:
7630 h := atomic.LoadAcq(&pp.runqhead)
7631 t := pp.runqtail
7632 qn := t - h
7633 if qn == 0 {
7634 return
7635 }
7636 if qn > uint32(len(pp.runq)) {
7637 goto retry
7638 }
7639
7640 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7641 goto retry
7642 }
7643
7644
7645
7646
7647
7648
7649
7650
7651 for i := uint32(0); i < qn; i++ {
7652 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7653 drainQ.pushBack(gp)
7654 }
7655 return
7656 }
7657
7658
7659
7660
7661
7662 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7663 for {
7664 h := atomic.LoadAcq(&pp.runqhead)
7665 t := atomic.LoadAcq(&pp.runqtail)
7666 n := t - h
7667 n = n - n/2
7668 if n == 0 {
7669 if stealRunNextG {
7670
7671 if next := pp.runnext; next != 0 {
7672 if pp.status == _Prunning {
7673 if mp := pp.m.ptr(); mp != nil {
7674 if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall {
7675
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688
7689
7690
7691
7692
7693
7694 if !osHasLowResTimer {
7695 usleep(3)
7696 } else {
7697
7698
7699
7700 osyield()
7701 }
7702 }
7703 }
7704 }
7705 if !pp.runnext.cas(next, 0) {
7706 continue
7707 }
7708 batch[batchHead%uint32(len(batch))] = next
7709 return 1
7710 }
7711 }
7712 return 0
7713 }
7714 if n > uint32(len(pp.runq)/2) {
7715 continue
7716 }
7717 for i := uint32(0); i < n; i++ {
7718 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7719 batch[(batchHead+i)%uint32(len(batch))] = g
7720 }
7721 if atomic.CasRel(&pp.runqhead, h, h+n) {
7722 return n
7723 }
7724 }
7725 }
7726
7727
7728
7729
7730 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7731 t := pp.runqtail
7732 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7733 if n == 0 {
7734 return nil
7735 }
7736 n--
7737 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7738 if n == 0 {
7739 return gp
7740 }
7741 h := atomic.LoadAcq(&pp.runqhead)
7742 if t-h+n >= uint32(len(pp.runq)) {
7743 throw("runqsteal: runq overflow")
7744 }
7745 atomic.StoreRel(&pp.runqtail, t+n)
7746 return gp
7747 }
7748
7749
7750
7751 type gQueue struct {
7752 head guintptr
7753 tail guintptr
7754 size int32
7755 }
7756
7757
7758 func (q *gQueue) empty() bool {
7759 return q.head == 0
7760 }
7761
7762
7763 func (q *gQueue) push(gp *g) {
7764 gp.schedlink = q.head
7765 q.head.set(gp)
7766 if q.tail == 0 {
7767 q.tail.set(gp)
7768 }
7769 q.size++
7770 }
7771
7772
7773 func (q *gQueue) pushBack(gp *g) {
7774 gp.schedlink = 0
7775 if q.tail != 0 {
7776 q.tail.ptr().schedlink.set(gp)
7777 } else {
7778 q.head.set(gp)
7779 }
7780 q.tail.set(gp)
7781 q.size++
7782 }
7783
7784
7785
7786 func (q *gQueue) pushBackAll(q2 gQueue) {
7787 if q2.tail == 0 {
7788 return
7789 }
7790 q2.tail.ptr().schedlink = 0
7791 if q.tail != 0 {
7792 q.tail.ptr().schedlink = q2.head
7793 } else {
7794 q.head = q2.head
7795 }
7796 q.tail = q2.tail
7797 q.size += q2.size
7798 }
7799
7800
7801
7802 func (q *gQueue) pop() *g {
7803 gp := q.head.ptr()
7804 if gp != nil {
7805 q.head = gp.schedlink
7806 if q.head == 0 {
7807 q.tail = 0
7808 }
7809 q.size--
7810 }
7811 return gp
7812 }
7813
7814
7815 func (q *gQueue) popList() gList {
7816 stack := gList{q.head, q.size}
7817 *q = gQueue{}
7818 return stack
7819 }
7820
7821
7822
7823 type gList struct {
7824 head guintptr
7825 size int32
7826 }
7827
7828
7829 func (l *gList) empty() bool {
7830 return l.head == 0
7831 }
7832
7833
7834 func (l *gList) push(gp *g) {
7835 gp.schedlink = l.head
7836 l.head.set(gp)
7837 l.size++
7838 }
7839
7840
7841 func (l *gList) pushAll(q gQueue) {
7842 if !q.empty() {
7843 q.tail.ptr().schedlink = l.head
7844 l.head = q.head
7845 l.size += q.size
7846 }
7847 }
7848
7849
7850 func (l *gList) pop() *g {
7851 gp := l.head.ptr()
7852 if gp != nil {
7853 l.head = gp.schedlink
7854 l.size--
7855 }
7856 return gp
7857 }
7858
7859
7860 func setMaxThreads(in int) (out int) {
7861 lock(&sched.lock)
7862 out = int(sched.maxmcount)
7863 if in > 0x7fffffff {
7864 sched.maxmcount = 0x7fffffff
7865 } else {
7866 sched.maxmcount = int32(in)
7867 }
7868 checkmcount()
7869 unlock(&sched.lock)
7870 return
7871 }
7872
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885 func procPin() int {
7886 gp := getg()
7887 mp := gp.m
7888
7889 mp.locks++
7890 return int(mp.p.ptr().id)
7891 }
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905 func procUnpin() {
7906 gp := getg()
7907 gp.m.locks--
7908 }
7909
7910
7911
7912 func sync_runtime_procPin() int {
7913 return procPin()
7914 }
7915
7916
7917
7918 func sync_runtime_procUnpin() {
7919 procUnpin()
7920 }
7921
7922
7923
7924 func sync_atomic_runtime_procPin() int {
7925 return procPin()
7926 }
7927
7928
7929
7930 func sync_atomic_runtime_procUnpin() {
7931 procUnpin()
7932 }
7933
7934
7935
7936
7937
7938 func internal_sync_runtime_canSpin(i int) bool {
7939
7940
7941
7942
7943
7944 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7945 return false
7946 }
7947 if p := getg().m.p.ptr(); !runqempty(p) {
7948 return false
7949 }
7950 return true
7951 }
7952
7953
7954
7955 func internal_sync_runtime_doSpin() {
7956 procyield(active_spin_cnt)
7957 }
7958
7959
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973 func sync_runtime_canSpin(i int) bool {
7974 return internal_sync_runtime_canSpin(i)
7975 }
7976
7977
7978
7979
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989 func sync_runtime_doSpin() {
7990 internal_sync_runtime_doSpin()
7991 }
7992
7993 var stealOrder randomOrder
7994
7995
7996
7997
7998
7999 type randomOrder struct {
8000 count uint32
8001 coprimes []uint32
8002 }
8003
8004 type randomEnum struct {
8005 i uint32
8006 count uint32
8007 pos uint32
8008 inc uint32
8009 }
8010
8011 func (ord *randomOrder) reset(count uint32) {
8012 ord.count = count
8013 ord.coprimes = ord.coprimes[:0]
8014 for i := uint32(1); i <= count; i++ {
8015 if gcd(i, count) == 1 {
8016 ord.coprimes = append(ord.coprimes, i)
8017 }
8018 }
8019 }
8020
8021 func (ord *randomOrder) start(i uint32) randomEnum {
8022 return randomEnum{
8023 count: ord.count,
8024 pos: i % ord.count,
8025 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
8026 }
8027 }
8028
8029 func (enum *randomEnum) done() bool {
8030 return enum.i == enum.count
8031 }
8032
8033 func (enum *randomEnum) next() {
8034 enum.i++
8035 enum.pos = (enum.pos + enum.inc) % enum.count
8036 }
8037
8038 func (enum *randomEnum) position() uint32 {
8039 return enum.pos
8040 }
8041
8042 func gcd(a, b uint32) uint32 {
8043 for b != 0 {
8044 a, b = b, a%b
8045 }
8046 return a
8047 }
8048
8049
8050
8051 type initTask struct {
8052 state uint32
8053 nfns uint32
8054
8055 }
8056
8057
8058
8059 var inittrace tracestat
8060
8061 type tracestat struct {
8062 active bool
8063 id uint64
8064 allocs uint64
8065 bytes uint64
8066 }
8067
8068 func doInit(ts []*initTask) {
8069 for _, t := range ts {
8070 doInit1(t)
8071 }
8072 }
8073
8074 func doInit1(t *initTask) {
8075 switch t.state {
8076 case 2:
8077 return
8078 case 1:
8079 throw("recursive call during initialization - linker skew")
8080 default:
8081 t.state = 1
8082
8083 var (
8084 start int64
8085 before tracestat
8086 )
8087
8088 if inittrace.active {
8089 start = nanotime()
8090
8091 before = inittrace
8092 }
8093
8094 if t.nfns == 0 {
8095
8096 throw("inittask with no functions")
8097 }
8098
8099 firstFunc := add(unsafe.Pointer(t), 8)
8100 for i := uint32(0); i < t.nfns; i++ {
8101 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
8102 f := *(*func())(unsafe.Pointer(&p))
8103 f()
8104 }
8105
8106 if inittrace.active {
8107 end := nanotime()
8108
8109 after := inittrace
8110
8111 f := *(*func())(unsafe.Pointer(&firstFunc))
8112 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
8113
8114 var sbuf [24]byte
8115 print("init ", pkg, " @")
8116 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
8117 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
8118 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
8119 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
8120 print("\n")
8121 }
8122
8123 t.state = 2
8124 }
8125 }
8126
View as plain text