Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
18 var modinfo string
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 var (
115 m0 m
116 g0 g
117 mcache0 *mcache
118 raceprocctx0 uintptr
119 raceFiniLock mutex
120 )
121
122
123
124 var runtime_inittasks []*initTask
125
126
127
128
129
130 var main_init_done chan bool
131
132
133 func main_main()
134
135
136 var mainStarted bool
137
138
139 var runtimeInitTime int64
140
141
142 var initSigmask sigset
143
144
145 func main() {
146 mp := getg().m
147
148
149
150 mp.g0.racectx = 0
151
152
153
154
155 if goarch.PtrSize == 8 {
156 maxstacksize = 1000000000
157 } else {
158 maxstacksize = 250000000
159 }
160
161
162
163
164 maxstackceiling = 2 * maxstacksize
165
166
167 mainStarted = true
168
169 if haveSysmon {
170 systemstack(func() {
171 newm(sysmon, nil, -1)
172 })
173 }
174
175
176
177
178
179
180
181 lockOSThread()
182
183 if mp != &m0 {
184 throw("runtime.main not on m0")
185 }
186
187
188
189 runtimeInitTime = nanotime()
190 if runtimeInitTime == 0 {
191 throw("nanotime returning zero")
192 }
193
194 if debug.inittrace != 0 {
195 inittrace.id = getg().goid
196 inittrace.active = true
197 }
198
199 doInit(runtime_inittasks)
200
201
202 needUnlock := true
203 defer func() {
204 if needUnlock {
205 unlockOSThread()
206 }
207 }()
208
209 gcenable()
210
211 main_init_done = make(chan bool)
212 if iscgo {
213 if _cgo_pthread_key_created == nil {
214 throw("_cgo_pthread_key_created missing")
215 }
216
217 if _cgo_thread_start == nil {
218 throw("_cgo_thread_start missing")
219 }
220 if GOOS != "windows" {
221 if _cgo_setenv == nil {
222 throw("_cgo_setenv missing")
223 }
224 if _cgo_unsetenv == nil {
225 throw("_cgo_unsetenv missing")
226 }
227 }
228 if _cgo_notify_runtime_init_done == nil {
229 throw("_cgo_notify_runtime_init_done missing")
230 }
231
232
233 if set_crosscall2 == nil {
234 throw("set_crosscall2 missing")
235 }
236 set_crosscall2()
237
238
239
240 startTemplateThread()
241 cgocall(_cgo_notify_runtime_init_done, nil)
242 }
243
244
245
246
247
248
249
250
251 for m := &firstmoduledata; m != nil; m = m.next {
252 doInit(m.inittasks)
253 }
254
255
256
257 inittrace.active = false
258
259 close(main_init_done)
260
261 needUnlock = false
262 unlockOSThread()
263
264 if isarchive || islibrary {
265
266
267 return
268 }
269 fn := main_main
270 fn()
271 if raceenabled {
272 runExitHooks(0)
273 racefini()
274 }
275
276
277
278
279
280 if runningPanicDefers.Load() != 0 {
281
282 for c := 0; c < 1000; c++ {
283 if runningPanicDefers.Load() == 0 {
284 break
285 }
286 Gosched()
287 }
288 }
289 if panicking.Load() != 0 {
290 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
291 }
292 runExitHooks(0)
293
294 exit(0)
295 for {
296 var x *int32
297 *x = 0
298 }
299 }
300
301
302
303
304 func os_beforeExit(exitCode int) {
305 runExitHooks(exitCode)
306 if exitCode == 0 && raceenabled {
307 racefini()
308 }
309 }
310
311
312 func init() {
313 go forcegchelper()
314 }
315
316 func forcegchelper() {
317 forcegc.g = getg()
318 lockInit(&forcegc.lock, lockRankForcegc)
319 for {
320 lock(&forcegc.lock)
321 if forcegc.idle.Load() {
322 throw("forcegc: phase error")
323 }
324 forcegc.idle.Store(true)
325 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
326
327 if debug.gctrace > 0 {
328 println("GC forced")
329 }
330
331 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
332 }
333 }
334
335
336
337
338
339 func Gosched() {
340 checkTimeouts()
341 mcall(gosched_m)
342 }
343
344
345
346
347
348 func goschedguarded() {
349 mcall(goschedguarded_m)
350 }
351
352
353
354
355
356
357 func goschedIfBusy() {
358 gp := getg()
359
360
361 if !gp.preempt && sched.npidle.Load() > 0 {
362 return
363 }
364 mcall(gosched_m)
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
385 if reason != waitReasonSleep {
386 checkTimeouts()
387 }
388 mp := acquirem()
389 gp := mp.curg
390 status := readgstatus(gp)
391 if status != _Grunning && status != _Gscanrunning {
392 throw("gopark: bad g status")
393 }
394 mp.waitlock = lock
395 mp.waitunlockf = unlockf
396 gp.waitreason = reason
397 mp.waitTraceBlockReason = traceReason
398 mp.waitTraceSkip = traceskip
399 releasem(mp)
400
401 mcall(park_m)
402 }
403
404
405
406 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
407 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
408 }
409
410 func goready(gp *g, traceskip int) {
411 systemstack(func() {
412 ready(gp, traceskip, true)
413 })
414 }
415
416
417 func acquireSudog() *sudog {
418
419
420
421
422
423
424
425
426 mp := acquirem()
427 pp := mp.p.ptr()
428 if len(pp.sudogcache) == 0 {
429 lock(&sched.sudoglock)
430
431 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
432 s := sched.sudogcache
433 sched.sudogcache = s.next
434 s.next = nil
435 pp.sudogcache = append(pp.sudogcache, s)
436 }
437 unlock(&sched.sudoglock)
438
439 if len(pp.sudogcache) == 0 {
440 pp.sudogcache = append(pp.sudogcache, new(sudog))
441 }
442 }
443 n := len(pp.sudogcache)
444 s := pp.sudogcache[n-1]
445 pp.sudogcache[n-1] = nil
446 pp.sudogcache = pp.sudogcache[:n-1]
447 if s.elem != nil {
448 throw("acquireSudog: found s.elem != nil in cache")
449 }
450 releasem(mp)
451 return s
452 }
453
454
455 func releaseSudog(s *sudog) {
456 if s.elem != nil {
457 throw("runtime: sudog with non-nil elem")
458 }
459 if s.isSelect {
460 throw("runtime: sudog with non-false isSelect")
461 }
462 if s.next != nil {
463 throw("runtime: sudog with non-nil next")
464 }
465 if s.prev != nil {
466 throw("runtime: sudog with non-nil prev")
467 }
468 if s.waitlink != nil {
469 throw("runtime: sudog with non-nil waitlink")
470 }
471 if s.c != nil {
472 throw("runtime: sudog with non-nil c")
473 }
474 gp := getg()
475 if gp.param != nil {
476 throw("runtime: releaseSudog with non-nil gp.param")
477 }
478 mp := acquirem()
479 pp := mp.p.ptr()
480 if len(pp.sudogcache) == cap(pp.sudogcache) {
481
482 var first, last *sudog
483 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
484 n := len(pp.sudogcache)
485 p := pp.sudogcache[n-1]
486 pp.sudogcache[n-1] = nil
487 pp.sudogcache = pp.sudogcache[:n-1]
488 if first == nil {
489 first = p
490 } else {
491 last.next = p
492 }
493 last = p
494 }
495 lock(&sched.sudoglock)
496 last.next = sched.sudogcache
497 sched.sudogcache = first
498 unlock(&sched.sudoglock)
499 }
500 pp.sudogcache = append(pp.sudogcache, s)
501 releasem(mp)
502 }
503
504
505 func badmcall(fn func(*g)) {
506 throw("runtime: mcall called on m->g0 stack")
507 }
508
509 func badmcall2(fn func(*g)) {
510 throw("runtime: mcall function returned")
511 }
512
513 func badreflectcall() {
514 panic(plainError("arg size to reflect.call more than 1GB"))
515 }
516
517
518
519 func badmorestackg0() {
520 if !crashStackImplemented {
521 writeErrStr("fatal: morestack on g0\n")
522 return
523 }
524
525 g := getg()
526 switchToCrashStack(func() {
527 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
528 g.m.traceback = 2
529 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
530 print("\n")
531
532 throw("morestack on g0")
533 })
534 }
535
536
537
538 func badmorestackgsignal() {
539 writeErrStr("fatal: morestack on gsignal\n")
540 }
541
542
543 func badctxt() {
544 throw("ctxt != 0")
545 }
546
547
548
549 var gcrash g
550
551 var crashingG atomic.Pointer[g]
552
553
554
555
556
557
558
559
560
561 func switchToCrashStack(fn func()) {
562 me := getg()
563 if crashingG.CompareAndSwapNoWB(nil, me) {
564 switchToCrashStack0(fn)
565 abort()
566 }
567 if crashingG.Load() == me {
568
569 writeErrStr("fatal: recursive switchToCrashStack\n")
570 abort()
571 }
572
573 usleep_no_g(100)
574 writeErrStr("fatal: concurrent switchToCrashStack\n")
575 abort()
576 }
577
578
579
580
581 const crashStackImplemented = (GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "s390x" || GOARCH == "wasm") && GOOS != "windows"
582
583
584 func switchToCrashStack0(fn func())
585
586 func lockedOSThread() bool {
587 gp := getg()
588 return gp.lockedm != 0 && gp.m.lockedg != 0
589 }
590
591 var (
592
593
594
595
596
597
598 allglock mutex
599 allgs []*g
600
601
602
603
604
605
606
607
608
609
610
611
612
613 allglen uintptr
614 allgptr **g
615 )
616
617 func allgadd(gp *g) {
618 if readgstatus(gp) == _Gidle {
619 throw("allgadd: bad status Gidle")
620 }
621
622 lock(&allglock)
623 allgs = append(allgs, gp)
624 if &allgs[0] != allgptr {
625 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
626 }
627 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
628 unlock(&allglock)
629 }
630
631
632
633
634 func allGsSnapshot() []*g {
635 assertWorldStoppedOrLockHeld(&allglock)
636
637
638
639
640
641
642 return allgs[:len(allgs):len(allgs)]
643 }
644
645
646 func atomicAllG() (**g, uintptr) {
647 length := atomic.Loaduintptr(&allglen)
648 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
649 return ptr, length
650 }
651
652
653 func atomicAllGIndex(ptr **g, i uintptr) *g {
654 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
655 }
656
657
658
659
660 func forEachG(fn func(gp *g)) {
661 lock(&allglock)
662 for _, gp := range allgs {
663 fn(gp)
664 }
665 unlock(&allglock)
666 }
667
668
669
670
671
672 func forEachGRace(fn func(gp *g)) {
673 ptr, length := atomicAllG()
674 for i := uintptr(0); i < length; i++ {
675 gp := atomicAllGIndex(ptr, i)
676 fn(gp)
677 }
678 return
679 }
680
681 const (
682
683
684 _GoidCacheBatch = 16
685 )
686
687
688
689 func cpuinit(env string) {
690 switch GOOS {
691 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
692 cpu.DebugOptions = true
693 }
694 cpu.Initialize(env)
695
696
697
698 switch GOARCH {
699 case "386", "amd64":
700 x86HasPOPCNT = cpu.X86.HasPOPCNT
701 x86HasSSE41 = cpu.X86.HasSSE41
702 x86HasFMA = cpu.X86.HasFMA
703
704 case "arm":
705 armHasVFPv4 = cpu.ARM.HasVFPv4
706
707 case "arm64":
708 arm64HasATOMICS = cpu.ARM64.HasATOMICS
709 }
710 }
711
712
713
714
715 func getGodebugEarly() string {
716 const prefix = "GODEBUG="
717 var env string
718 switch GOOS {
719 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
720
721
722
723 n := int32(0)
724 for argv_index(argv, argc+1+n) != nil {
725 n++
726 }
727
728 for i := int32(0); i < n; i++ {
729 p := argv_index(argv, argc+1+i)
730 s := unsafe.String(p, findnull(p))
731
732 if hasPrefix(s, prefix) {
733 env = gostring(p)[len(prefix):]
734 break
735 }
736 }
737 }
738 return env
739 }
740
741
742
743
744
745
746
747
748
749 func schedinit() {
750 lockInit(&sched.lock, lockRankSched)
751 lockInit(&sched.sysmonlock, lockRankSysmon)
752 lockInit(&sched.deferlock, lockRankDefer)
753 lockInit(&sched.sudoglock, lockRankSudog)
754 lockInit(&deadlock, lockRankDeadlock)
755 lockInit(&paniclk, lockRankPanic)
756 lockInit(&allglock, lockRankAllg)
757 lockInit(&allpLock, lockRankAllp)
758 lockInit(&reflectOffs.lock, lockRankReflectOffs)
759 lockInit(&finlock, lockRankFin)
760 lockInit(&cpuprof.lock, lockRankCpuprof)
761 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
762 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
763 traceLockInit()
764
765
766
767 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
768
769
770
771 gp := getg()
772 if raceenabled {
773 gp.racectx, raceprocctx0 = raceinit()
774 }
775
776 sched.maxmcount = 10000
777 crashFD.Store(^uintptr(0))
778
779
780 worldStopped()
781
782 ticks.init()
783 moduledataverify()
784 stackinit()
785 mallocinit()
786 godebug := getGodebugEarly()
787 initPageTrace(godebug)
788 cpuinit(godebug)
789 randinit()
790 alginit()
791 mcommoninit(gp.m, -1)
792 modulesinit()
793 typelinksinit()
794 itabsinit()
795 stkobjinit()
796
797 sigsave(&gp.m.sigmask)
798 initSigmask = gp.m.sigmask
799
800 goargs()
801 goenvs()
802 secure()
803 checkfds()
804 parsedebugvars()
805 gcinit()
806
807
808
809 gcrash.stack = stackalloc(16384)
810 gcrash.stackguard0 = gcrash.stack.lo + 1000
811 gcrash.stackguard1 = gcrash.stack.lo + 1000
812
813
814
815
816
817 if disableMemoryProfiling {
818 MemProfileRate = 0
819 }
820
821 lock(&sched.lock)
822 sched.lastpoll.Store(nanotime())
823 procs := ncpu
824 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
825 procs = n
826 }
827 if procresize(procs) != nil {
828 throw("unknown runnable goroutine during bootstrap")
829 }
830 unlock(&sched.lock)
831
832
833 worldStarted()
834
835 if buildVersion == "" {
836
837
838 buildVersion = "unknown"
839 }
840 if len(modinfo) == 1 {
841
842
843 modinfo = ""
844 }
845 }
846
847 func dumpgstatus(gp *g) {
848 thisg := getg()
849 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
850 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
851 }
852
853
854 func checkmcount() {
855 assertLockHeld(&sched.lock)
856
857
858
859
860
861
862
863
864
865 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
866 if count > sched.maxmcount {
867 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
868 throw("thread exhaustion")
869 }
870 }
871
872
873
874
875
876 func mReserveID() int64 {
877 assertLockHeld(&sched.lock)
878
879 if sched.mnext+1 < sched.mnext {
880 throw("runtime: thread ID overflow")
881 }
882 id := sched.mnext
883 sched.mnext++
884 checkmcount()
885 return id
886 }
887
888
889 func mcommoninit(mp *m, id int64) {
890 gp := getg()
891
892
893 if gp != gp.m.g0 {
894 callers(1, mp.createstack[:])
895 }
896
897 lock(&sched.lock)
898
899 if id >= 0 {
900 mp.id = id
901 } else {
902 mp.id = mReserveID()
903 }
904
905 mrandinit(mp)
906
907 mpreinit(mp)
908 if mp.gsignal != nil {
909 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
910 }
911
912
913
914 mp.alllink = allm
915
916
917
918 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
919 unlock(&sched.lock)
920
921
922 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
923 mp.cgoCallers = new(cgoCallers)
924 }
925 }
926
927 func (mp *m) becomeSpinning() {
928 mp.spinning = true
929 sched.nmspinning.Add(1)
930 sched.needspinning.Store(0)
931 }
932
933 func (mp *m) hasCgoOnStack() bool {
934 return mp.ncgo > 0 || mp.isextra
935 }
936
937 const (
938
939
940 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
941
942
943
944 osHasLowResClockInt = goos.IsWindows
945
946
947
948 osHasLowResClock = osHasLowResClockInt > 0
949 )
950
951
952 func ready(gp *g, traceskip int, next bool) {
953 status := readgstatus(gp)
954
955
956 mp := acquirem()
957 if status&^_Gscan != _Gwaiting {
958 dumpgstatus(gp)
959 throw("bad g->status in ready")
960 }
961
962
963 trace := traceAcquire()
964 casgstatus(gp, _Gwaiting, _Grunnable)
965 if trace.ok() {
966 trace.GoUnpark(gp, traceskip)
967 traceRelease(trace)
968 }
969 runqput(mp.p.ptr(), gp, next)
970 wakep()
971 releasem(mp)
972 }
973
974
975
976 const freezeStopWait = 0x7fffffff
977
978
979
980 var freezing atomic.Bool
981
982
983
984
985 func freezetheworld() {
986 freezing.Store(true)
987 if debug.dontfreezetheworld > 0 {
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 usleep(1000)
1013 return
1014 }
1015
1016
1017
1018
1019 for i := 0; i < 5; i++ {
1020
1021 sched.stopwait = freezeStopWait
1022 sched.gcwaiting.Store(true)
1023
1024 if !preemptall() {
1025 break
1026 }
1027 usleep(1000)
1028 }
1029
1030 usleep(1000)
1031 preemptall()
1032 usleep(1000)
1033 }
1034
1035
1036
1037
1038
1039 func readgstatus(gp *g) uint32 {
1040 return gp.atomicstatus.Load()
1041 }
1042
1043
1044
1045
1046
1047 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1048 success := false
1049
1050
1051 switch oldval {
1052 default:
1053 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1054 dumpgstatus(gp)
1055 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1056 case _Gscanrunnable,
1057 _Gscanwaiting,
1058 _Gscanrunning,
1059 _Gscansyscall,
1060 _Gscanpreempted:
1061 if newval == oldval&^_Gscan {
1062 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1063 }
1064 }
1065 if !success {
1066 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1067 dumpgstatus(gp)
1068 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1069 }
1070 releaseLockRankAndM(lockRankGscan)
1071 }
1072
1073
1074
1075 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1076 switch oldval {
1077 case _Grunnable,
1078 _Grunning,
1079 _Gwaiting,
1080 _Gsyscall:
1081 if newval == oldval|_Gscan {
1082 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1083 if r {
1084 acquireLockRankAndM(lockRankGscan)
1085 }
1086 return r
1087
1088 }
1089 }
1090 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1091 throw("castogscanstatus")
1092 panic("not reached")
1093 }
1094
1095
1096
1097 var casgstatusAlwaysTrack = false
1098
1099
1100
1101
1102
1103
1104
1105 func casgstatus(gp *g, oldval, newval uint32) {
1106 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1107 systemstack(func() {
1108
1109
1110 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1111 throw("casgstatus: bad incoming values")
1112 })
1113 }
1114
1115 lockWithRankMayAcquire(nil, lockRankGscan)
1116
1117
1118 const yieldDelay = 5 * 1000
1119 var nextYield int64
1120
1121
1122
1123 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1124 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1125 systemstack(func() {
1126
1127
1128 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1129 })
1130 }
1131 if i == 0 {
1132 nextYield = nanotime() + yieldDelay
1133 }
1134 if nanotime() < nextYield {
1135 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1136 procyield(1)
1137 }
1138 } else {
1139 osyield()
1140 nextYield = nanotime() + yieldDelay/2
1141 }
1142 }
1143
1144 if oldval == _Grunning {
1145
1146 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1147 gp.tracking = true
1148 }
1149 gp.trackingSeq++
1150 }
1151 if !gp.tracking {
1152 return
1153 }
1154
1155
1156
1157
1158
1159
1160 switch oldval {
1161 case _Grunnable:
1162
1163
1164
1165 now := nanotime()
1166 gp.runnableTime += now - gp.trackingStamp
1167 gp.trackingStamp = 0
1168 case _Gwaiting:
1169 if !gp.waitreason.isMutexWait() {
1170
1171 break
1172 }
1173
1174
1175
1176
1177
1178 now := nanotime()
1179 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1180 gp.trackingStamp = 0
1181 }
1182 switch newval {
1183 case _Gwaiting:
1184 if !gp.waitreason.isMutexWait() {
1185
1186 break
1187 }
1188
1189 now := nanotime()
1190 gp.trackingStamp = now
1191 case _Grunnable:
1192
1193
1194 now := nanotime()
1195 gp.trackingStamp = now
1196 case _Grunning:
1197
1198
1199
1200 gp.tracking = false
1201 sched.timeToRun.record(gp.runnableTime)
1202 gp.runnableTime = 0
1203 }
1204 }
1205
1206
1207
1208
1209 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1210
1211 gp.waitreason = reason
1212 casgstatus(gp, old, _Gwaiting)
1213 }
1214
1215
1216
1217
1218
1219 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1220 if !reason.isWaitingForGC() {
1221 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1222 }
1223 casGToWaiting(gp, old, reason)
1224 }
1225
1226
1227
1228
1229
1230
1231
1232
1233 func casgcopystack(gp *g) uint32 {
1234 for {
1235 oldstatus := readgstatus(gp) &^ _Gscan
1236 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1237 throw("copystack: bad status, not Gwaiting or Grunnable")
1238 }
1239 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1240 return oldstatus
1241 }
1242 }
1243 }
1244
1245
1246
1247
1248
1249 func casGToPreemptScan(gp *g, old, new uint32) {
1250 if old != _Grunning || new != _Gscan|_Gpreempted {
1251 throw("bad g transition")
1252 }
1253 acquireLockRankAndM(lockRankGscan)
1254 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1255 }
1256 }
1257
1258
1259
1260
1261 func casGFromPreempted(gp *g, old, new uint32) bool {
1262 if old != _Gpreempted || new != _Gwaiting {
1263 throw("bad g transition")
1264 }
1265 gp.waitreason = waitReasonPreempted
1266 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1267 }
1268
1269
1270 type stwReason uint8
1271
1272
1273
1274
1275 const (
1276 stwUnknown stwReason = iota
1277 stwGCMarkTerm
1278 stwGCSweepTerm
1279 stwWriteHeapDump
1280 stwGoroutineProfile
1281 stwGoroutineProfileCleanup
1282 stwAllGoroutinesStack
1283 stwReadMemStats
1284 stwAllThreadsSyscall
1285 stwGOMAXPROCS
1286 stwStartTrace
1287 stwStopTrace
1288 stwForTestCountPagesInUse
1289 stwForTestReadMetricsSlow
1290 stwForTestReadMemStatsSlow
1291 stwForTestPageCachePagesLeaked
1292 stwForTestResetDebugLog
1293 )
1294
1295 func (r stwReason) String() string {
1296 return stwReasonStrings[r]
1297 }
1298
1299 func (r stwReason) isGC() bool {
1300 return r == stwGCMarkTerm || r == stwGCSweepTerm
1301 }
1302
1303
1304
1305
1306 var stwReasonStrings = [...]string{
1307 stwUnknown: "unknown",
1308 stwGCMarkTerm: "GC mark termination",
1309 stwGCSweepTerm: "GC sweep termination",
1310 stwWriteHeapDump: "write heap dump",
1311 stwGoroutineProfile: "goroutine profile",
1312 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1313 stwAllGoroutinesStack: "all goroutines stack trace",
1314 stwReadMemStats: "read mem stats",
1315 stwAllThreadsSyscall: "AllThreadsSyscall",
1316 stwGOMAXPROCS: "GOMAXPROCS",
1317 stwStartTrace: "start trace",
1318 stwStopTrace: "stop trace",
1319 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1320 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1321 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1322 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1323 stwForTestResetDebugLog: "ResetDebugLog (test)",
1324 }
1325
1326
1327
1328 type worldStop struct {
1329 reason stwReason
1330 startedStopping int64
1331 finishedStopping int64
1332 stoppingCPUTime int64
1333 }
1334
1335
1336
1337
1338 var stopTheWorldContext worldStop
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 func stopTheWorld(reason stwReason) worldStop {
1358 semacquire(&worldsema)
1359 gp := getg()
1360 gp.m.preemptoff = reason.String()
1361 systemstack(func() {
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1377 stopTheWorldContext = stopTheWorldWithSema(reason)
1378 casgstatus(gp, _Gwaiting, _Grunning)
1379 })
1380 return stopTheWorldContext
1381 }
1382
1383
1384
1385
1386 func startTheWorld(w worldStop) {
1387 systemstack(func() { startTheWorldWithSema(0, w) })
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404 mp := acquirem()
1405 mp.preemptoff = ""
1406 semrelease1(&worldsema, true, 0)
1407 releasem(mp)
1408 }
1409
1410
1411
1412
1413 func stopTheWorldGC(reason stwReason) worldStop {
1414 semacquire(&gcsema)
1415 return stopTheWorld(reason)
1416 }
1417
1418
1419
1420
1421 func startTheWorldGC(w worldStop) {
1422 startTheWorld(w)
1423 semrelease(&gcsema)
1424 }
1425
1426
1427 var worldsema uint32 = 1
1428
1429
1430
1431
1432
1433
1434
1435 var gcsema uint32 = 1
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 func stopTheWorldWithSema(reason stwReason) worldStop {
1468 trace := traceAcquire()
1469 if trace.ok() {
1470 trace.STWStart(reason)
1471 traceRelease(trace)
1472 }
1473 gp := getg()
1474
1475
1476
1477 if gp.m.locks > 0 {
1478 throw("stopTheWorld: holding locks")
1479 }
1480
1481 lock(&sched.lock)
1482 start := nanotime()
1483 sched.stopwait = gomaxprocs
1484 sched.gcwaiting.Store(true)
1485 preemptall()
1486
1487 gp.m.p.ptr().status = _Pgcstop
1488 gp.m.p.ptr().gcStopTime = start
1489 sched.stopwait--
1490
1491 trace = traceAcquire()
1492 for _, pp := range allp {
1493 s := pp.status
1494 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1495 if trace.ok() {
1496 trace.ProcSteal(pp, false)
1497 }
1498 pp.syscalltick++
1499 pp.gcStopTime = nanotime()
1500 sched.stopwait--
1501 }
1502 }
1503 if trace.ok() {
1504 traceRelease(trace)
1505 }
1506
1507
1508 now := nanotime()
1509 for {
1510 pp, _ := pidleget(now)
1511 if pp == nil {
1512 break
1513 }
1514 pp.status = _Pgcstop
1515 pp.gcStopTime = nanotime()
1516 sched.stopwait--
1517 }
1518 wait := sched.stopwait > 0
1519 unlock(&sched.lock)
1520
1521
1522 if wait {
1523 for {
1524
1525 if notetsleep(&sched.stopnote, 100*1000) {
1526 noteclear(&sched.stopnote)
1527 break
1528 }
1529 preemptall()
1530 }
1531 }
1532
1533 finish := nanotime()
1534 startTime := finish - start
1535 if reason.isGC() {
1536 sched.stwStoppingTimeGC.record(startTime)
1537 } else {
1538 sched.stwStoppingTimeOther.record(startTime)
1539 }
1540
1541
1542
1543
1544
1545 stoppingCPUTime := int64(0)
1546 bad := ""
1547 if sched.stopwait != 0 {
1548 bad = "stopTheWorld: not stopped (stopwait != 0)"
1549 } else {
1550 for _, pp := range allp {
1551 if pp.status != _Pgcstop {
1552 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1553 }
1554 if pp.gcStopTime == 0 && bad == "" {
1555 bad = "stopTheWorld: broken CPU time accounting"
1556 }
1557 stoppingCPUTime += finish - pp.gcStopTime
1558 pp.gcStopTime = 0
1559 }
1560 }
1561 if freezing.Load() {
1562
1563
1564
1565
1566 lock(&deadlock)
1567 lock(&deadlock)
1568 }
1569 if bad != "" {
1570 throw(bad)
1571 }
1572
1573 worldStopped()
1574
1575 return worldStop{
1576 reason: reason,
1577 startedStopping: start,
1578 finishedStopping: finish,
1579 stoppingCPUTime: stoppingCPUTime,
1580 }
1581 }
1582
1583
1584
1585
1586
1587
1588
1589 func startTheWorldWithSema(now int64, w worldStop) int64 {
1590 assertWorldStopped()
1591
1592 mp := acquirem()
1593 if netpollinited() {
1594 list, delta := netpoll(0)
1595 injectglist(&list)
1596 netpollAdjustWaiters(delta)
1597 }
1598 lock(&sched.lock)
1599
1600 procs := gomaxprocs
1601 if newprocs != 0 {
1602 procs = newprocs
1603 newprocs = 0
1604 }
1605 p1 := procresize(procs)
1606 sched.gcwaiting.Store(false)
1607 if sched.sysmonwait.Load() {
1608 sched.sysmonwait.Store(false)
1609 notewakeup(&sched.sysmonnote)
1610 }
1611 unlock(&sched.lock)
1612
1613 worldStarted()
1614
1615 for p1 != nil {
1616 p := p1
1617 p1 = p1.link.ptr()
1618 if p.m != 0 {
1619 mp := p.m.ptr()
1620 p.m = 0
1621 if mp.nextp != 0 {
1622 throw("startTheWorld: inconsistent mp->nextp")
1623 }
1624 mp.nextp.set(p)
1625 notewakeup(&mp.park)
1626 } else {
1627
1628 newm(nil, p, -1)
1629 }
1630 }
1631
1632
1633 if now == 0 {
1634 now = nanotime()
1635 }
1636 totalTime := now - w.startedStopping
1637 if w.reason.isGC() {
1638 sched.stwTotalTimeGC.record(totalTime)
1639 } else {
1640 sched.stwTotalTimeOther.record(totalTime)
1641 }
1642 trace := traceAcquire()
1643 if trace.ok() {
1644 trace.STWDone()
1645 traceRelease(trace)
1646 }
1647
1648
1649
1650
1651 wakep()
1652
1653 releasem(mp)
1654
1655 return now
1656 }
1657
1658
1659
1660 func usesLibcall() bool {
1661 switch GOOS {
1662 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1663 return true
1664 case "openbsd":
1665 return GOARCH != "mips64"
1666 }
1667 return false
1668 }
1669
1670
1671
1672 func mStackIsSystemAllocated() bool {
1673 switch GOOS {
1674 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1675 return true
1676 case "openbsd":
1677 return GOARCH != "mips64"
1678 }
1679 return false
1680 }
1681
1682
1683
1684 func mstart()
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695 func mstart0() {
1696 gp := getg()
1697
1698 osStack := gp.stack.lo == 0
1699 if osStack {
1700
1701
1702
1703
1704
1705
1706
1707
1708 size := gp.stack.hi
1709 if size == 0 {
1710 size = 16384 * sys.StackGuardMultiplier
1711 }
1712 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1713 gp.stack.lo = gp.stack.hi - size + 1024
1714 }
1715
1716
1717 gp.stackguard0 = gp.stack.lo + stackGuard
1718
1719
1720 gp.stackguard1 = gp.stackguard0
1721 mstart1()
1722
1723
1724 if mStackIsSystemAllocated() {
1725
1726
1727
1728 osStack = true
1729 }
1730 mexit(osStack)
1731 }
1732
1733
1734
1735
1736
1737 func mstart1() {
1738 gp := getg()
1739
1740 if gp != gp.m.g0 {
1741 throw("bad runtime·mstart")
1742 }
1743
1744
1745
1746
1747
1748
1749
1750 gp.sched.g = guintptr(unsafe.Pointer(gp))
1751 gp.sched.pc = getcallerpc()
1752 gp.sched.sp = getcallersp()
1753
1754 asminit()
1755 minit()
1756
1757
1758
1759 if gp.m == &m0 {
1760 mstartm0()
1761 }
1762
1763 if fn := gp.m.mstartfn; fn != nil {
1764 fn()
1765 }
1766
1767 if gp.m != &m0 {
1768 acquirep(gp.m.nextp.ptr())
1769 gp.m.nextp = 0
1770 }
1771 schedule()
1772 }
1773
1774
1775
1776
1777
1778
1779
1780 func mstartm0() {
1781
1782
1783
1784 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1785 cgoHasExtraM = true
1786 newextram()
1787 }
1788 initsig(false)
1789 }
1790
1791
1792
1793
1794 func mPark() {
1795 gp := getg()
1796 notesleep(&gp.m.park)
1797 noteclear(&gp.m.park)
1798 }
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 func mexit(osStack bool) {
1811 mp := getg().m
1812
1813 if mp == &m0 {
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 handoffp(releasep())
1826 lock(&sched.lock)
1827 sched.nmfreed++
1828 checkdead()
1829 unlock(&sched.lock)
1830 mPark()
1831 throw("locked m0 woke up")
1832 }
1833
1834 sigblock(true)
1835 unminit()
1836
1837
1838 if mp.gsignal != nil {
1839 stackfree(mp.gsignal.stack)
1840
1841
1842
1843
1844 mp.gsignal = nil
1845 }
1846
1847
1848 lock(&sched.lock)
1849 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1850 if *pprev == mp {
1851 *pprev = mp.alllink
1852 goto found
1853 }
1854 }
1855 throw("m not found in allm")
1856 found:
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871 mp.freeWait.Store(freeMWait)
1872 mp.freelink = sched.freem
1873 sched.freem = mp
1874 unlock(&sched.lock)
1875
1876 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1877 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1878
1879
1880 handoffp(releasep())
1881
1882
1883
1884
1885
1886 lock(&sched.lock)
1887 sched.nmfreed++
1888 checkdead()
1889 unlock(&sched.lock)
1890
1891 if GOOS == "darwin" || GOOS == "ios" {
1892
1893
1894 if mp.signalPending.Load() != 0 {
1895 pendingPreemptSignals.Add(-1)
1896 }
1897 }
1898
1899
1900
1901 mdestroy(mp)
1902
1903 if osStack {
1904
1905 mp.freeWait.Store(freeMRef)
1906
1907
1908
1909 return
1910 }
1911
1912
1913
1914
1915
1916 exitThread(&mp.freeWait)
1917 }
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929 func forEachP(reason waitReason, fn func(*p)) {
1930 systemstack(func() {
1931 gp := getg().m.curg
1932
1933
1934
1935
1936
1937
1938
1939
1940 casGToWaitingForGC(gp, _Grunning, reason)
1941 forEachPInternal(fn)
1942 casgstatus(gp, _Gwaiting, _Grunning)
1943 })
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955 func forEachPInternal(fn func(*p)) {
1956 mp := acquirem()
1957 pp := getg().m.p.ptr()
1958
1959 lock(&sched.lock)
1960 if sched.safePointWait != 0 {
1961 throw("forEachP: sched.safePointWait != 0")
1962 }
1963 sched.safePointWait = gomaxprocs - 1
1964 sched.safePointFn = fn
1965
1966
1967 for _, p2 := range allp {
1968 if p2 != pp {
1969 atomic.Store(&p2.runSafePointFn, 1)
1970 }
1971 }
1972 preemptall()
1973
1974
1975
1976
1977
1978
1979
1980 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1981 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1982 fn(p)
1983 sched.safePointWait--
1984 }
1985 }
1986
1987 wait := sched.safePointWait > 0
1988 unlock(&sched.lock)
1989
1990
1991 fn(pp)
1992
1993
1994
1995 for _, p2 := range allp {
1996 s := p2.status
1997
1998
1999
2000 trace := traceAcquire()
2001 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2002 if trace.ok() {
2003
2004 trace.ProcSteal(p2, false)
2005 traceRelease(trace)
2006 }
2007 p2.syscalltick++
2008 handoffp(p2)
2009 } else if trace.ok() {
2010 traceRelease(trace)
2011 }
2012 }
2013
2014
2015 if wait {
2016 for {
2017
2018
2019
2020
2021 if notetsleep(&sched.safePointNote, 100*1000) {
2022 noteclear(&sched.safePointNote)
2023 break
2024 }
2025 preemptall()
2026 }
2027 }
2028 if sched.safePointWait != 0 {
2029 throw("forEachP: not done")
2030 }
2031 for _, p2 := range allp {
2032 if p2.runSafePointFn != 0 {
2033 throw("forEachP: P did not run fn")
2034 }
2035 }
2036
2037 lock(&sched.lock)
2038 sched.safePointFn = nil
2039 unlock(&sched.lock)
2040 releasem(mp)
2041 }
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054 func runSafePointFn() {
2055 p := getg().m.p.ptr()
2056
2057
2058
2059 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2060 return
2061 }
2062 sched.safePointFn(p)
2063 lock(&sched.lock)
2064 sched.safePointWait--
2065 if sched.safePointWait == 0 {
2066 notewakeup(&sched.safePointNote)
2067 }
2068 unlock(&sched.lock)
2069 }
2070
2071
2072
2073
2074 var cgoThreadStart unsafe.Pointer
2075
2076 type cgothreadstart struct {
2077 g guintptr
2078 tls *uint64
2079 fn unsafe.Pointer
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 func allocm(pp *p, fn func(), id int64) *m {
2092 allocmLock.rlock()
2093
2094
2095
2096
2097 acquirem()
2098
2099 gp := getg()
2100 if gp.m.p == 0 {
2101 acquirep(pp)
2102 }
2103
2104
2105
2106 if sched.freem != nil {
2107 lock(&sched.lock)
2108 var newList *m
2109 for freem := sched.freem; freem != nil; {
2110
2111 wait := freem.freeWait.Load()
2112 if wait == freeMWait {
2113 next := freem.freelink
2114 freem.freelink = newList
2115 newList = freem
2116 freem = next
2117 continue
2118 }
2119
2120
2121
2122 if traceEnabled() || traceShuttingDown() {
2123 traceThreadDestroy(freem)
2124 }
2125
2126
2127
2128 if wait == freeMStack {
2129
2130
2131
2132 systemstack(func() {
2133 stackfree(freem.g0.stack)
2134 })
2135 }
2136 freem = freem.freelink
2137 }
2138 sched.freem = newList
2139 unlock(&sched.lock)
2140 }
2141
2142 mp := new(m)
2143 mp.mstartfn = fn
2144 mcommoninit(mp, id)
2145
2146
2147
2148 if iscgo || mStackIsSystemAllocated() {
2149 mp.g0 = malg(-1)
2150 } else {
2151 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2152 }
2153 mp.g0.m = mp
2154
2155 if pp == gp.m.p.ptr() {
2156 releasep()
2157 }
2158
2159 releasem(gp.m)
2160 allocmLock.runlock()
2161 return mp
2162 }
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203 func needm(signal bool) {
2204 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2205
2206
2207
2208
2209
2210
2211 writeErrStr("fatal error: cgo callback before cgo call\n")
2212 exit(1)
2213 }
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 var sigmask sigset
2224 sigsave(&sigmask)
2225 sigblock(false)
2226
2227
2228
2229
2230 mp, last := getExtraM()
2231
2232
2233
2234
2235
2236
2237
2238
2239 mp.needextram = last
2240
2241
2242 mp.sigmask = sigmask
2243
2244
2245
2246 osSetupTLS(mp)
2247
2248
2249
2250 setg(mp.g0)
2251 sp := getcallersp()
2252 callbackUpdateSystemStack(mp, sp, signal)
2253
2254
2255
2256
2257 mp.isExtraInC = false
2258
2259
2260 asminit()
2261 minit()
2262
2263
2264
2265
2266
2267
2268 var trace traceLocker
2269 if !signal {
2270 trace = traceAcquire()
2271 }
2272
2273
2274 casgstatus(mp.curg, _Gdead, _Gsyscall)
2275 sched.ngsys.Add(-1)
2276
2277 if !signal {
2278 if trace.ok() {
2279 trace.GoCreateSyscall(mp.curg)
2280 traceRelease(trace)
2281 }
2282 }
2283 mp.isExtraInSig = signal
2284 }
2285
2286
2287
2288
2289 func needAndBindM() {
2290 needm(false)
2291
2292 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2293 cgoBindM()
2294 }
2295 }
2296
2297
2298
2299
2300 func newextram() {
2301 c := extraMWaiters.Swap(0)
2302 if c > 0 {
2303 for i := uint32(0); i < c; i++ {
2304 oneNewExtraM()
2305 }
2306 } else if extraMLength.Load() == 0 {
2307
2308 oneNewExtraM()
2309 }
2310 }
2311
2312
2313 func oneNewExtraM() {
2314
2315
2316
2317
2318
2319 mp := allocm(nil, nil, -1)
2320 gp := malg(4096)
2321 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2322 gp.sched.sp = gp.stack.hi
2323 gp.sched.sp -= 4 * goarch.PtrSize
2324 gp.sched.lr = 0
2325 gp.sched.g = guintptr(unsafe.Pointer(gp))
2326 gp.syscallpc = gp.sched.pc
2327 gp.syscallsp = gp.sched.sp
2328 gp.stktopsp = gp.sched.sp
2329
2330
2331
2332
2333 casgstatus(gp, _Gidle, _Gdead)
2334 gp.m = mp
2335 mp.curg = gp
2336 mp.isextra = true
2337
2338 mp.isExtraInC = true
2339 mp.lockedInt++
2340 mp.lockedg.set(gp)
2341 gp.lockedm.set(mp)
2342 gp.goid = sched.goidgen.Add(1)
2343 if raceenabled {
2344 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2345 }
2346 trace := traceAcquire()
2347 if trace.ok() {
2348 trace.OneNewExtraM(gp)
2349 traceRelease(trace)
2350 }
2351
2352 allgadd(gp)
2353
2354
2355
2356
2357
2358 sched.ngsys.Add(1)
2359
2360
2361 addExtraM(mp)
2362 }
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397 func dropm() {
2398
2399
2400
2401 mp := getg().m
2402
2403
2404
2405
2406
2407 var trace traceLocker
2408 if !mp.isExtraInSig {
2409 trace = traceAcquire()
2410 }
2411
2412
2413 casgstatus(mp.curg, _Gsyscall, _Gdead)
2414 mp.curg.preemptStop = false
2415 sched.ngsys.Add(1)
2416
2417 if !mp.isExtraInSig {
2418 if trace.ok() {
2419 trace.GoDestroySyscall()
2420 traceRelease(trace)
2421 }
2422 }
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437 mp.syscalltick--
2438
2439
2440
2441 mp.curg.trace.reset()
2442
2443
2444
2445
2446 if traceEnabled() || traceShuttingDown() {
2447
2448
2449
2450
2451
2452
2453
2454 lock(&sched.lock)
2455 traceThreadDestroy(mp)
2456 unlock(&sched.lock)
2457 }
2458 mp.isExtraInSig = false
2459
2460
2461
2462
2463
2464 sigmask := mp.sigmask
2465 sigblock(false)
2466 unminit()
2467
2468 setg(nil)
2469
2470
2471
2472 g0 := mp.g0
2473 g0.stack.hi = 0
2474 g0.stack.lo = 0
2475 g0.stackguard0 = 0
2476 g0.stackguard1 = 0
2477
2478 putExtraM(mp)
2479
2480 msigrestore(sigmask)
2481 }
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 func cgoBindM() {
2504 if GOOS == "windows" || GOOS == "plan9" {
2505 fatal("bindm in unexpected GOOS")
2506 }
2507 g := getg()
2508 if g.m.g0 != g {
2509 fatal("the current g is not g0")
2510 }
2511 if _cgo_bindm != nil {
2512 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2513 }
2514 }
2515
2516
2517 func getm() uintptr {
2518 return uintptr(unsafe.Pointer(getg().m))
2519 }
2520
2521 var (
2522
2523
2524
2525
2526
2527
2528 extraM atomic.Uintptr
2529
2530 extraMLength atomic.Uint32
2531
2532 extraMWaiters atomic.Uint32
2533
2534
2535 extraMInUse atomic.Uint32
2536 )
2537
2538
2539
2540
2541
2542
2543
2544
2545 func lockextra(nilokay bool) *m {
2546 const locked = 1
2547
2548 incr := false
2549 for {
2550 old := extraM.Load()
2551 if old == locked {
2552 osyield_no_g()
2553 continue
2554 }
2555 if old == 0 && !nilokay {
2556 if !incr {
2557
2558
2559
2560 extraMWaiters.Add(1)
2561 incr = true
2562 }
2563 usleep_no_g(1)
2564 continue
2565 }
2566 if extraM.CompareAndSwap(old, locked) {
2567 return (*m)(unsafe.Pointer(old))
2568 }
2569 osyield_no_g()
2570 continue
2571 }
2572 }
2573
2574
2575 func unlockextra(mp *m, delta int32) {
2576 extraMLength.Add(delta)
2577 extraM.Store(uintptr(unsafe.Pointer(mp)))
2578 }
2579
2580
2581
2582
2583
2584
2585
2586
2587 func getExtraM() (mp *m, last bool) {
2588 mp = lockextra(false)
2589 extraMInUse.Add(1)
2590 unlockextra(mp.schedlink.ptr(), -1)
2591 return mp, mp.schedlink.ptr() == nil
2592 }
2593
2594
2595
2596
2597
2598 func putExtraM(mp *m) {
2599 extraMInUse.Add(-1)
2600 addExtraM(mp)
2601 }
2602
2603
2604
2605
2606 func addExtraM(mp *m) {
2607 mnext := lockextra(true)
2608 mp.schedlink.set(mnext)
2609 unlockextra(mp, 1)
2610 }
2611
2612 var (
2613
2614
2615
2616 allocmLock rwmutex
2617
2618
2619
2620
2621 execLock rwmutex
2622 )
2623
2624
2625
2626 const (
2627 failthreadcreate = "runtime: failed to create new OS thread\n"
2628 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2629 )
2630
2631
2632
2633
2634 var newmHandoff struct {
2635 lock mutex
2636
2637
2638
2639 newm muintptr
2640
2641
2642
2643 waiting bool
2644 wake note
2645
2646
2647
2648
2649 haveTemplateThread uint32
2650 }
2651
2652
2653
2654
2655
2656
2657
2658
2659 func newm(fn func(), pp *p, id int64) {
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670 acquirem()
2671
2672 mp := allocm(pp, fn, id)
2673 mp.nextp.set(pp)
2674 mp.sigmask = initSigmask
2675 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687 lock(&newmHandoff.lock)
2688 if newmHandoff.haveTemplateThread == 0 {
2689 throw("on a locked thread with no template thread")
2690 }
2691 mp.schedlink = newmHandoff.newm
2692 newmHandoff.newm.set(mp)
2693 if newmHandoff.waiting {
2694 newmHandoff.waiting = false
2695 notewakeup(&newmHandoff.wake)
2696 }
2697 unlock(&newmHandoff.lock)
2698
2699
2700
2701 releasem(getg().m)
2702 return
2703 }
2704 newm1(mp)
2705 releasem(getg().m)
2706 }
2707
2708 func newm1(mp *m) {
2709 if iscgo {
2710 var ts cgothreadstart
2711 if _cgo_thread_start == nil {
2712 throw("_cgo_thread_start missing")
2713 }
2714 ts.g.set(mp.g0)
2715 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2716 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2717 if msanenabled {
2718 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2719 }
2720 if asanenabled {
2721 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2722 }
2723 execLock.rlock()
2724 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2725 execLock.runlock()
2726 return
2727 }
2728 execLock.rlock()
2729 newosproc(mp)
2730 execLock.runlock()
2731 }
2732
2733
2734
2735
2736
2737 func startTemplateThread() {
2738 if GOARCH == "wasm" {
2739 return
2740 }
2741
2742
2743
2744 mp := acquirem()
2745 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2746 releasem(mp)
2747 return
2748 }
2749 newm(templateThread, nil, -1)
2750 releasem(mp)
2751 }
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765 func templateThread() {
2766 lock(&sched.lock)
2767 sched.nmsys++
2768 checkdead()
2769 unlock(&sched.lock)
2770
2771 for {
2772 lock(&newmHandoff.lock)
2773 for newmHandoff.newm != 0 {
2774 newm := newmHandoff.newm.ptr()
2775 newmHandoff.newm = 0
2776 unlock(&newmHandoff.lock)
2777 for newm != nil {
2778 next := newm.schedlink.ptr()
2779 newm.schedlink = 0
2780 newm1(newm)
2781 newm = next
2782 }
2783 lock(&newmHandoff.lock)
2784 }
2785 newmHandoff.waiting = true
2786 noteclear(&newmHandoff.wake)
2787 unlock(&newmHandoff.lock)
2788 notesleep(&newmHandoff.wake)
2789 }
2790 }
2791
2792
2793
2794 func stopm() {
2795 gp := getg()
2796
2797 if gp.m.locks != 0 {
2798 throw("stopm holding locks")
2799 }
2800 if gp.m.p != 0 {
2801 throw("stopm holding p")
2802 }
2803 if gp.m.spinning {
2804 throw("stopm spinning")
2805 }
2806
2807 lock(&sched.lock)
2808 mput(gp.m)
2809 unlock(&sched.lock)
2810 mPark()
2811 acquirep(gp.m.nextp.ptr())
2812 gp.m.nextp = 0
2813 }
2814
2815 func mspinning() {
2816
2817 getg().m.spinning = true
2818 }
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837 func startm(pp *p, spinning, lockheld bool) {
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854 mp := acquirem()
2855 if !lockheld {
2856 lock(&sched.lock)
2857 }
2858 if pp == nil {
2859 if spinning {
2860
2861
2862
2863 throw("startm: P required for spinning=true")
2864 }
2865 pp, _ = pidleget(0)
2866 if pp == nil {
2867 if !lockheld {
2868 unlock(&sched.lock)
2869 }
2870 releasem(mp)
2871 return
2872 }
2873 }
2874 nmp := mget()
2875 if nmp == nil {
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890 id := mReserveID()
2891 unlock(&sched.lock)
2892
2893 var fn func()
2894 if spinning {
2895
2896 fn = mspinning
2897 }
2898 newm(fn, pp, id)
2899
2900 if lockheld {
2901 lock(&sched.lock)
2902 }
2903
2904
2905 releasem(mp)
2906 return
2907 }
2908 if !lockheld {
2909 unlock(&sched.lock)
2910 }
2911 if nmp.spinning {
2912 throw("startm: m is spinning")
2913 }
2914 if nmp.nextp != 0 {
2915 throw("startm: m has p")
2916 }
2917 if spinning && !runqempty(pp) {
2918 throw("startm: p has runnable gs")
2919 }
2920
2921 nmp.spinning = spinning
2922 nmp.nextp.set(pp)
2923 notewakeup(&nmp.park)
2924
2925
2926 releasem(mp)
2927 }
2928
2929
2930
2931
2932
2933 func handoffp(pp *p) {
2934
2935
2936
2937
2938 if !runqempty(pp) || sched.runqsize != 0 {
2939 startm(pp, false, false)
2940 return
2941 }
2942
2943 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
2944 startm(pp, false, false)
2945 return
2946 }
2947
2948 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
2949 startm(pp, false, false)
2950 return
2951 }
2952
2953
2954 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
2955 sched.needspinning.Store(0)
2956 startm(pp, true, false)
2957 return
2958 }
2959 lock(&sched.lock)
2960 if sched.gcwaiting.Load() {
2961 pp.status = _Pgcstop
2962 pp.gcStopTime = nanotime()
2963 sched.stopwait--
2964 if sched.stopwait == 0 {
2965 notewakeup(&sched.stopnote)
2966 }
2967 unlock(&sched.lock)
2968 return
2969 }
2970 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
2971 sched.safePointFn(pp)
2972 sched.safePointWait--
2973 if sched.safePointWait == 0 {
2974 notewakeup(&sched.safePointNote)
2975 }
2976 }
2977 if sched.runqsize != 0 {
2978 unlock(&sched.lock)
2979 startm(pp, false, false)
2980 return
2981 }
2982
2983
2984 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
2985 unlock(&sched.lock)
2986 startm(pp, false, false)
2987 return
2988 }
2989
2990
2991
2992 when := pp.timers.wakeTime()
2993 pidleput(pp, 0)
2994 unlock(&sched.lock)
2995
2996 if when != 0 {
2997 wakeNetPoller(when)
2998 }
2999 }
3000
3001
3002
3003
3004 func wakep() {
3005
3006
3007 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3008 return
3009 }
3010
3011
3012
3013
3014
3015
3016 mp := acquirem()
3017
3018 var pp *p
3019 lock(&sched.lock)
3020 pp, _ = pidlegetSpinning(0)
3021 if pp == nil {
3022 if sched.nmspinning.Add(-1) < 0 {
3023 throw("wakep: negative nmspinning")
3024 }
3025 unlock(&sched.lock)
3026 releasem(mp)
3027 return
3028 }
3029
3030
3031
3032
3033 unlock(&sched.lock)
3034
3035 startm(pp, true, false)
3036
3037 releasem(mp)
3038 }
3039
3040
3041
3042 func stoplockedm() {
3043 gp := getg()
3044
3045 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3046 throw("stoplockedm: inconsistent locking")
3047 }
3048 if gp.m.p != 0 {
3049
3050 pp := releasep()
3051 handoffp(pp)
3052 }
3053 incidlelocked(1)
3054
3055 mPark()
3056 status := readgstatus(gp.m.lockedg.ptr())
3057 if status&^_Gscan != _Grunnable {
3058 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3059 dumpgstatus(gp.m.lockedg.ptr())
3060 throw("stoplockedm: not runnable")
3061 }
3062 acquirep(gp.m.nextp.ptr())
3063 gp.m.nextp = 0
3064 }
3065
3066
3067
3068
3069
3070 func startlockedm(gp *g) {
3071 mp := gp.lockedm.ptr()
3072 if mp == getg().m {
3073 throw("startlockedm: locked to me")
3074 }
3075 if mp.nextp != 0 {
3076 throw("startlockedm: m has p")
3077 }
3078
3079 incidlelocked(-1)
3080 pp := releasep()
3081 mp.nextp.set(pp)
3082 notewakeup(&mp.park)
3083 stopm()
3084 }
3085
3086
3087
3088 func gcstopm() {
3089 gp := getg()
3090
3091 if !sched.gcwaiting.Load() {
3092 throw("gcstopm: not waiting for gc")
3093 }
3094 if gp.m.spinning {
3095 gp.m.spinning = false
3096
3097
3098 if sched.nmspinning.Add(-1) < 0 {
3099 throw("gcstopm: negative nmspinning")
3100 }
3101 }
3102 pp := releasep()
3103 lock(&sched.lock)
3104 pp.status = _Pgcstop
3105 pp.gcStopTime = nanotime()
3106 sched.stopwait--
3107 if sched.stopwait == 0 {
3108 notewakeup(&sched.stopnote)
3109 }
3110 unlock(&sched.lock)
3111 stopm()
3112 }
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123 func execute(gp *g, inheritTime bool) {
3124 mp := getg().m
3125
3126 if goroutineProfile.active {
3127
3128
3129
3130 tryRecordGoroutineProfile(gp, osyield)
3131 }
3132
3133
3134
3135 mp.curg = gp
3136 gp.m = mp
3137 casgstatus(gp, _Grunnable, _Grunning)
3138 gp.waitsince = 0
3139 gp.preempt = false
3140 gp.stackguard0 = gp.stack.lo + stackGuard
3141 if !inheritTime {
3142 mp.p.ptr().schedtick++
3143 }
3144
3145
3146 hz := sched.profilehz
3147 if mp.profilehz != hz {
3148 setThreadCPUProfiler(hz)
3149 }
3150
3151 trace := traceAcquire()
3152 if trace.ok() {
3153 trace.GoStart()
3154 traceRelease(trace)
3155 }
3156
3157 gogo(&gp.sched)
3158 }
3159
3160
3161
3162
3163
3164 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3165 mp := getg().m
3166
3167
3168
3169
3170
3171 top:
3172 pp := mp.p.ptr()
3173 if sched.gcwaiting.Load() {
3174 gcstopm()
3175 goto top
3176 }
3177 if pp.runSafePointFn != 0 {
3178 runSafePointFn()
3179 }
3180
3181
3182
3183
3184
3185 now, pollUntil, _ := pp.timers.check(0)
3186
3187
3188 if traceEnabled() || traceShuttingDown() {
3189 gp := traceReader()
3190 if gp != nil {
3191 trace := traceAcquire()
3192 casgstatus(gp, _Gwaiting, _Grunnable)
3193 if trace.ok() {
3194 trace.GoUnpark(gp, 0)
3195 traceRelease(trace)
3196 }
3197 return gp, false, true
3198 }
3199 }
3200
3201
3202 if gcBlackenEnabled != 0 {
3203 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3204 if gp != nil {
3205 return gp, false, true
3206 }
3207 now = tnow
3208 }
3209
3210
3211
3212
3213 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3214 lock(&sched.lock)
3215 gp := globrunqget(pp, 1)
3216 unlock(&sched.lock)
3217 if gp != nil {
3218 return gp, false, false
3219 }
3220 }
3221
3222
3223 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3224 if gp := wakefing(); gp != nil {
3225 ready(gp, 0, true)
3226 }
3227 }
3228 if *cgo_yield != nil {
3229 asmcgocall(*cgo_yield, nil)
3230 }
3231
3232
3233 if gp, inheritTime := runqget(pp); gp != nil {
3234 return gp, inheritTime, false
3235 }
3236
3237
3238 if sched.runqsize != 0 {
3239 lock(&sched.lock)
3240 gp := globrunqget(pp, 0)
3241 unlock(&sched.lock)
3242 if gp != nil {
3243 return gp, false, false
3244 }
3245 }
3246
3247
3248
3249
3250
3251
3252
3253
3254 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3255 if list, delta := netpoll(0); !list.empty() {
3256 gp := list.pop()
3257 injectglist(&list)
3258 netpollAdjustWaiters(delta)
3259 trace := traceAcquire()
3260 casgstatus(gp, _Gwaiting, _Grunnable)
3261 if trace.ok() {
3262 trace.GoUnpark(gp, 0)
3263 traceRelease(trace)
3264 }
3265 return gp, false, false
3266 }
3267 }
3268
3269
3270
3271
3272
3273
3274 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3275 if !mp.spinning {
3276 mp.becomeSpinning()
3277 }
3278
3279 gp, inheritTime, tnow, w, newWork := stealWork(now)
3280 if gp != nil {
3281
3282 return gp, inheritTime, false
3283 }
3284 if newWork {
3285
3286
3287 goto top
3288 }
3289
3290 now = tnow
3291 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3292
3293 pollUntil = w
3294 }
3295 }
3296
3297
3298
3299
3300
3301 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3302 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3303 if node != nil {
3304 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3305 gp := node.gp.ptr()
3306
3307 trace := traceAcquire()
3308 casgstatus(gp, _Gwaiting, _Grunnable)
3309 if trace.ok() {
3310 trace.GoUnpark(gp, 0)
3311 traceRelease(trace)
3312 }
3313 return gp, false, false
3314 }
3315 gcController.removeIdleMarkWorker()
3316 }
3317
3318
3319
3320
3321
3322 gp, otherReady := beforeIdle(now, pollUntil)
3323 if gp != nil {
3324 trace := traceAcquire()
3325 casgstatus(gp, _Gwaiting, _Grunnable)
3326 if trace.ok() {
3327 trace.GoUnpark(gp, 0)
3328 traceRelease(trace)
3329 }
3330 return gp, false, false
3331 }
3332 if otherReady {
3333 goto top
3334 }
3335
3336
3337
3338
3339
3340 allpSnapshot := allp
3341
3342
3343 idlepMaskSnapshot := idlepMask
3344 timerpMaskSnapshot := timerpMask
3345
3346
3347 lock(&sched.lock)
3348 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3349 unlock(&sched.lock)
3350 goto top
3351 }
3352 if sched.runqsize != 0 {
3353 gp := globrunqget(pp, 0)
3354 unlock(&sched.lock)
3355 return gp, false, false
3356 }
3357 if !mp.spinning && sched.needspinning.Load() == 1 {
3358
3359 mp.becomeSpinning()
3360 unlock(&sched.lock)
3361 goto top
3362 }
3363 if releasep() != pp {
3364 throw("findrunnable: wrong p")
3365 }
3366 now = pidleput(pp, now)
3367 unlock(&sched.lock)
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405 wasSpinning := mp.spinning
3406 if mp.spinning {
3407 mp.spinning = false
3408 if sched.nmspinning.Add(-1) < 0 {
3409 throw("findrunnable: negative nmspinning")
3410 }
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423 lock(&sched.lock)
3424 if sched.runqsize != 0 {
3425 pp, _ := pidlegetSpinning(0)
3426 if pp != nil {
3427 gp := globrunqget(pp, 0)
3428 if gp == nil {
3429 throw("global runq empty with non-zero runqsize")
3430 }
3431 unlock(&sched.lock)
3432 acquirep(pp)
3433 mp.becomeSpinning()
3434 return gp, false, false
3435 }
3436 }
3437 unlock(&sched.lock)
3438
3439 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3440 if pp != nil {
3441 acquirep(pp)
3442 mp.becomeSpinning()
3443 goto top
3444 }
3445
3446
3447 pp, gp := checkIdleGCNoP()
3448 if pp != nil {
3449 acquirep(pp)
3450 mp.becomeSpinning()
3451
3452
3453 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3454 trace := traceAcquire()
3455 casgstatus(gp, _Gwaiting, _Grunnable)
3456 if trace.ok() {
3457 trace.GoUnpark(gp, 0)
3458 traceRelease(trace)
3459 }
3460 return gp, false, false
3461 }
3462
3463
3464
3465
3466
3467
3468
3469 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3470 }
3471
3472
3473 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3474 sched.pollUntil.Store(pollUntil)
3475 if mp.p != 0 {
3476 throw("findrunnable: netpoll with p")
3477 }
3478 if mp.spinning {
3479 throw("findrunnable: netpoll with spinning")
3480 }
3481 delay := int64(-1)
3482 if pollUntil != 0 {
3483 if now == 0 {
3484 now = nanotime()
3485 }
3486 delay = pollUntil - now
3487 if delay < 0 {
3488 delay = 0
3489 }
3490 }
3491 if faketime != 0 {
3492
3493 delay = 0
3494 }
3495 list, delta := netpoll(delay)
3496
3497 now = nanotime()
3498 sched.pollUntil.Store(0)
3499 sched.lastpoll.Store(now)
3500 if faketime != 0 && list.empty() {
3501
3502
3503 stopm()
3504 goto top
3505 }
3506 lock(&sched.lock)
3507 pp, _ := pidleget(now)
3508 unlock(&sched.lock)
3509 if pp == nil {
3510 injectglist(&list)
3511 netpollAdjustWaiters(delta)
3512 } else {
3513 acquirep(pp)
3514 if !list.empty() {
3515 gp := list.pop()
3516 injectglist(&list)
3517 netpollAdjustWaiters(delta)
3518 trace := traceAcquire()
3519 casgstatus(gp, _Gwaiting, _Grunnable)
3520 if trace.ok() {
3521 trace.GoUnpark(gp, 0)
3522 traceRelease(trace)
3523 }
3524 return gp, false, false
3525 }
3526 if wasSpinning {
3527 mp.becomeSpinning()
3528 }
3529 goto top
3530 }
3531 } else if pollUntil != 0 && netpollinited() {
3532 pollerPollUntil := sched.pollUntil.Load()
3533 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3534 netpollBreak()
3535 }
3536 }
3537 stopm()
3538 goto top
3539 }
3540
3541
3542
3543
3544
3545 func pollWork() bool {
3546 if sched.runqsize != 0 {
3547 return true
3548 }
3549 p := getg().m.p.ptr()
3550 if !runqempty(p) {
3551 return true
3552 }
3553 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3554 if list, delta := netpoll(0); !list.empty() {
3555 injectglist(&list)
3556 netpollAdjustWaiters(delta)
3557 return true
3558 }
3559 }
3560 return false
3561 }
3562
3563
3564
3565
3566
3567
3568
3569 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3570 pp := getg().m.p.ptr()
3571
3572 ranTimer := false
3573
3574 const stealTries = 4
3575 for i := 0; i < stealTries; i++ {
3576 stealTimersOrRunNextG := i == stealTries-1
3577
3578 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3579 if sched.gcwaiting.Load() {
3580
3581 return nil, false, now, pollUntil, true
3582 }
3583 p2 := allp[enum.position()]
3584 if pp == p2 {
3585 continue
3586 }
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3602 tnow, w, ran := p2.timers.check(now)
3603 now = tnow
3604 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3605 pollUntil = w
3606 }
3607 if ran {
3608
3609
3610
3611
3612
3613
3614
3615
3616 if gp, inheritTime := runqget(pp); gp != nil {
3617 return gp, inheritTime, now, pollUntil, ranTimer
3618 }
3619 ranTimer = true
3620 }
3621 }
3622
3623
3624 if !idlepMask.read(enum.position()) {
3625 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3626 return gp, false, now, pollUntil, ranTimer
3627 }
3628 }
3629 }
3630 }
3631
3632
3633
3634
3635 return nil, false, now, pollUntil, ranTimer
3636 }
3637
3638
3639
3640
3641
3642
3643 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3644 for id, p2 := range allpSnapshot {
3645 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3646 lock(&sched.lock)
3647 pp, _ := pidlegetSpinning(0)
3648 if pp == nil {
3649
3650 unlock(&sched.lock)
3651 return nil
3652 }
3653 unlock(&sched.lock)
3654 return pp
3655 }
3656 }
3657
3658
3659 return nil
3660 }
3661
3662
3663
3664
3665 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3666 for id, p2 := range allpSnapshot {
3667 if timerpMaskSnapshot.read(uint32(id)) {
3668 w := p2.timers.wakeTime()
3669 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3670 pollUntil = w
3671 }
3672 }
3673 }
3674
3675 return pollUntil
3676 }
3677
3678
3679
3680
3681
3682 func checkIdleGCNoP() (*p, *g) {
3683
3684
3685
3686
3687
3688
3689 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3690 return nil, nil
3691 }
3692 if !gcMarkWorkAvailable(nil) {
3693 return nil, nil
3694 }
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713 lock(&sched.lock)
3714 pp, now := pidlegetSpinning(0)
3715 if pp == nil {
3716 unlock(&sched.lock)
3717 return nil, nil
3718 }
3719
3720
3721 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3722 pidleput(pp, now)
3723 unlock(&sched.lock)
3724 return nil, nil
3725 }
3726
3727 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3728 if node == nil {
3729 pidleput(pp, now)
3730 unlock(&sched.lock)
3731 gcController.removeIdleMarkWorker()
3732 return nil, nil
3733 }
3734
3735 unlock(&sched.lock)
3736
3737 return pp, node.gp.ptr()
3738 }
3739
3740
3741
3742
3743 func wakeNetPoller(when int64) {
3744 if sched.lastpoll.Load() == 0 {
3745
3746
3747
3748
3749 pollerPollUntil := sched.pollUntil.Load()
3750 if pollerPollUntil == 0 || pollerPollUntil > when {
3751 netpollBreak()
3752 }
3753 } else {
3754
3755
3756 if GOOS != "plan9" {
3757 wakep()
3758 }
3759 }
3760 }
3761
3762 func resetspinning() {
3763 gp := getg()
3764 if !gp.m.spinning {
3765 throw("resetspinning: not a spinning m")
3766 }
3767 gp.m.spinning = false
3768 nmspinning := sched.nmspinning.Add(-1)
3769 if nmspinning < 0 {
3770 throw("findrunnable: negative nmspinning")
3771 }
3772
3773
3774
3775 wakep()
3776 }
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786 func injectglist(glist *gList) {
3787 if glist.empty() {
3788 return
3789 }
3790 trace := traceAcquire()
3791 if trace.ok() {
3792 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3793 trace.GoUnpark(gp, 0)
3794 }
3795 traceRelease(trace)
3796 }
3797
3798
3799
3800 head := glist.head.ptr()
3801 var tail *g
3802 qsize := 0
3803 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3804 tail = gp
3805 qsize++
3806 casgstatus(gp, _Gwaiting, _Grunnable)
3807 }
3808
3809
3810 var q gQueue
3811 q.head.set(head)
3812 q.tail.set(tail)
3813 *glist = gList{}
3814
3815 startIdle := func(n int) {
3816 for i := 0; i < n; i++ {
3817 mp := acquirem()
3818 lock(&sched.lock)
3819
3820 pp, _ := pidlegetSpinning(0)
3821 if pp == nil {
3822 unlock(&sched.lock)
3823 releasem(mp)
3824 break
3825 }
3826
3827 startm(pp, false, true)
3828 unlock(&sched.lock)
3829 releasem(mp)
3830 }
3831 }
3832
3833 pp := getg().m.p.ptr()
3834 if pp == nil {
3835 lock(&sched.lock)
3836 globrunqputbatch(&q, int32(qsize))
3837 unlock(&sched.lock)
3838 startIdle(qsize)
3839 return
3840 }
3841
3842 npidle := int(sched.npidle.Load())
3843 var (
3844 globq gQueue
3845 n int
3846 )
3847 for n = 0; n < npidle && !q.empty(); n++ {
3848 g := q.pop()
3849 globq.pushBack(g)
3850 }
3851 if n > 0 {
3852 lock(&sched.lock)
3853 globrunqputbatch(&globq, int32(n))
3854 unlock(&sched.lock)
3855 startIdle(n)
3856 qsize -= n
3857 }
3858
3859 if !q.empty() {
3860 runqputbatch(pp, &q, qsize)
3861 }
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876 wakep()
3877 }
3878
3879
3880
3881 func schedule() {
3882 mp := getg().m
3883
3884 if mp.locks != 0 {
3885 throw("schedule: holding locks")
3886 }
3887
3888 if mp.lockedg != 0 {
3889 stoplockedm()
3890 execute(mp.lockedg.ptr(), false)
3891 }
3892
3893
3894
3895 if mp.incgo {
3896 throw("schedule: in cgo")
3897 }
3898
3899 top:
3900 pp := mp.p.ptr()
3901 pp.preempt = false
3902
3903
3904
3905
3906 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3907 throw("schedule: spinning with local work")
3908 }
3909
3910 gp, inheritTime, tryWakeP := findRunnable()
3911
3912 if debug.dontfreezetheworld > 0 && freezing.Load() {
3913
3914
3915
3916
3917
3918
3919
3920 lock(&deadlock)
3921 lock(&deadlock)
3922 }
3923
3924
3925
3926
3927 if mp.spinning {
3928 resetspinning()
3929 }
3930
3931 if sched.disable.user && !schedEnabled(gp) {
3932
3933
3934
3935 lock(&sched.lock)
3936 if schedEnabled(gp) {
3937
3938
3939 unlock(&sched.lock)
3940 } else {
3941 sched.disable.runnable.pushBack(gp)
3942 sched.disable.n++
3943 unlock(&sched.lock)
3944 goto top
3945 }
3946 }
3947
3948
3949
3950 if tryWakeP {
3951 wakep()
3952 }
3953 if gp.lockedm != 0 {
3954
3955
3956 startlockedm(gp)
3957 goto top
3958 }
3959
3960 execute(gp, inheritTime)
3961 }
3962
3963
3964
3965
3966
3967
3968
3969
3970 func dropg() {
3971 gp := getg()
3972
3973 setMNoWB(&gp.m.curg.m, nil)
3974 setGNoWB(&gp.m.curg, nil)
3975 }
3976
3977 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3978 unlock((*mutex)(lock))
3979 return true
3980 }
3981
3982
3983 func park_m(gp *g) {
3984 mp := getg().m
3985
3986 trace := traceAcquire()
3987
3988 if trace.ok() {
3989
3990
3991
3992 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
3993 }
3994
3995
3996 casgstatus(gp, _Grunning, _Gwaiting)
3997 if trace.ok() {
3998 traceRelease(trace)
3999 }
4000
4001 dropg()
4002
4003 if fn := mp.waitunlockf; fn != nil {
4004 ok := fn(gp, mp.waitlock)
4005 mp.waitunlockf = nil
4006 mp.waitlock = nil
4007 if !ok {
4008 trace := traceAcquire()
4009 casgstatus(gp, _Gwaiting, _Grunnable)
4010 if trace.ok() {
4011 trace.GoUnpark(gp, 2)
4012 traceRelease(trace)
4013 }
4014 execute(gp, true)
4015 }
4016 }
4017 schedule()
4018 }
4019
4020 func goschedImpl(gp *g, preempted bool) {
4021 trace := traceAcquire()
4022 status := readgstatus(gp)
4023 if status&^_Gscan != _Grunning {
4024 dumpgstatus(gp)
4025 throw("bad g status")
4026 }
4027 if trace.ok() {
4028
4029
4030
4031 if preempted {
4032 trace.GoPreempt()
4033 } else {
4034 trace.GoSched()
4035 }
4036 }
4037 casgstatus(gp, _Grunning, _Grunnable)
4038 if trace.ok() {
4039 traceRelease(trace)
4040 }
4041
4042 dropg()
4043 lock(&sched.lock)
4044 globrunqput(gp)
4045 unlock(&sched.lock)
4046
4047 if mainStarted {
4048 wakep()
4049 }
4050
4051 schedule()
4052 }
4053
4054
4055 func gosched_m(gp *g) {
4056 goschedImpl(gp, false)
4057 }
4058
4059
4060 func goschedguarded_m(gp *g) {
4061 if !canPreemptM(gp.m) {
4062 gogo(&gp.sched)
4063 }
4064 goschedImpl(gp, false)
4065 }
4066
4067 func gopreempt_m(gp *g) {
4068 goschedImpl(gp, true)
4069 }
4070
4071
4072
4073
4074 func preemptPark(gp *g) {
4075 status := readgstatus(gp)
4076 if status&^_Gscan != _Grunning {
4077 dumpgstatus(gp)
4078 throw("bad g status")
4079 }
4080
4081 if gp.asyncSafePoint {
4082
4083
4084
4085 f := findfunc(gp.sched.pc)
4086 if !f.valid() {
4087 throw("preempt at unknown pc")
4088 }
4089 if f.flag&abi.FuncFlagSPWrite != 0 {
4090 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4091 throw("preempt SPWRITE")
4092 }
4093 }
4094
4095
4096
4097
4098
4099
4100
4101 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4102 dropg()
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119 trace := traceAcquire()
4120 if trace.ok() {
4121 trace.GoPark(traceBlockPreempted, 0)
4122 }
4123 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4124 if trace.ok() {
4125 traceRelease(trace)
4126 }
4127 schedule()
4128 }
4129
4130
4131
4132
4133 func goyield() {
4134 checkTimeouts()
4135 mcall(goyield_m)
4136 }
4137
4138 func goyield_m(gp *g) {
4139 trace := traceAcquire()
4140 pp := gp.m.p.ptr()
4141 if trace.ok() {
4142
4143
4144
4145 trace.GoPreempt()
4146 }
4147 casgstatus(gp, _Grunning, _Grunnable)
4148 if trace.ok() {
4149 traceRelease(trace)
4150 }
4151 dropg()
4152 runqput(pp, gp, false)
4153 schedule()
4154 }
4155
4156
4157 func goexit1() {
4158 if raceenabled {
4159 racegoend()
4160 }
4161 trace := traceAcquire()
4162 if trace.ok() {
4163 trace.GoEnd()
4164 traceRelease(trace)
4165 }
4166 mcall(goexit0)
4167 }
4168
4169
4170 func goexit0(gp *g) {
4171 gdestroy(gp)
4172 schedule()
4173 }
4174
4175 func gdestroy(gp *g) {
4176 mp := getg().m
4177 pp := mp.p.ptr()
4178
4179 casgstatus(gp, _Grunning, _Gdead)
4180 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4181 if isSystemGoroutine(gp, false) {
4182 sched.ngsys.Add(-1)
4183 }
4184 gp.m = nil
4185 locked := gp.lockedm != 0
4186 gp.lockedm = 0
4187 mp.lockedg = 0
4188 gp.preemptStop = false
4189 gp.paniconfault = false
4190 gp._defer = nil
4191 gp._panic = nil
4192 gp.writebuf = nil
4193 gp.waitreason = waitReasonZero
4194 gp.param = nil
4195 gp.labels = nil
4196 gp.timer = nil
4197
4198 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4199
4200
4201
4202 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4203 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4204 gcController.bgScanCredit.Add(scanCredit)
4205 gp.gcAssistBytes = 0
4206 }
4207
4208 dropg()
4209
4210 if GOARCH == "wasm" {
4211 gfput(pp, gp)
4212 return
4213 }
4214
4215 if mp.lockedInt != 0 {
4216 print("invalid m->lockedInt = ", mp.lockedInt, "\n")
4217 throw("internal lockOSThread error")
4218 }
4219 gfput(pp, gp)
4220 if locked {
4221
4222
4223
4224
4225
4226
4227 if GOOS != "plan9" {
4228 gogo(&mp.g0.sched)
4229 } else {
4230
4231
4232 mp.lockedExt = 0
4233 }
4234 }
4235 }
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245 func save(pc, sp, bp uintptr) {
4246 gp := getg()
4247
4248 if gp == gp.m.g0 || gp == gp.m.gsignal {
4249
4250
4251
4252
4253
4254 throw("save on system g not allowed")
4255 }
4256
4257 gp.sched.pc = pc
4258 gp.sched.sp = sp
4259 gp.sched.lr = 0
4260 gp.sched.ret = 0
4261 gp.sched.bp = bp
4262
4263
4264
4265 if gp.sched.ctxt != nil {
4266 badctxt()
4267 }
4268 }
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294 func reentersyscall(pc, sp, bp uintptr) {
4295 trace := traceAcquire()
4296 gp := getg()
4297
4298
4299
4300 gp.m.locks++
4301
4302
4303
4304
4305
4306 gp.stackguard0 = stackPreempt
4307 gp.throwsplit = true
4308
4309
4310 save(pc, sp, bp)
4311 gp.syscallsp = sp
4312 gp.syscallpc = pc
4313 gp.syscallbp = bp
4314 casgstatus(gp, _Grunning, _Gsyscall)
4315 if staticLockRanking {
4316
4317
4318 save(pc, sp, bp)
4319 }
4320 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4321 systemstack(func() {
4322 print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4323 throw("entersyscall")
4324 })
4325 }
4326
4327 if trace.ok() {
4328 systemstack(func() {
4329 trace.GoSysCall()
4330 traceRelease(trace)
4331 })
4332
4333
4334
4335 save(pc, sp, bp)
4336 }
4337
4338 if sched.sysmonwait.Load() {
4339 systemstack(entersyscall_sysmon)
4340 save(pc, sp, bp)
4341 }
4342
4343 if gp.m.p.ptr().runSafePointFn != 0 {
4344
4345 systemstack(runSafePointFn)
4346 save(pc, sp, bp)
4347 }
4348
4349 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4350 pp := gp.m.p.ptr()
4351 pp.m = 0
4352 gp.m.oldp.set(pp)
4353 gp.m.p = 0
4354 atomic.Store(&pp.status, _Psyscall)
4355 if sched.gcwaiting.Load() {
4356 systemstack(entersyscall_gcwait)
4357 save(pc, sp, bp)
4358 }
4359
4360 gp.m.locks--
4361 }
4362
4363
4364
4365
4366
4367
4368
4369 func entersyscall() {
4370
4371
4372
4373
4374 fp := getcallerfp()
4375 reentersyscall(getcallerpc(), getcallersp(), fp)
4376 }
4377
4378 func entersyscall_sysmon() {
4379 lock(&sched.lock)
4380 if sched.sysmonwait.Load() {
4381 sched.sysmonwait.Store(false)
4382 notewakeup(&sched.sysmonnote)
4383 }
4384 unlock(&sched.lock)
4385 }
4386
4387 func entersyscall_gcwait() {
4388 gp := getg()
4389 pp := gp.m.oldp.ptr()
4390
4391 lock(&sched.lock)
4392 trace := traceAcquire()
4393 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4394 if trace.ok() {
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404 trace.ProcSteal(pp, true)
4405 traceRelease(trace)
4406 }
4407 pp.gcStopTime = nanotime()
4408 pp.syscalltick++
4409 if sched.stopwait--; sched.stopwait == 0 {
4410 notewakeup(&sched.stopnote)
4411 }
4412 } else if trace.ok() {
4413 traceRelease(trace)
4414 }
4415 unlock(&sched.lock)
4416 }
4417
4418
4419
4420
4421 func entersyscallblock() {
4422 gp := getg()
4423
4424 gp.m.locks++
4425 gp.throwsplit = true
4426 gp.stackguard0 = stackPreempt
4427 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4428 gp.m.p.ptr().syscalltick++
4429
4430
4431 pc := getcallerpc()
4432 sp := getcallersp()
4433 bp := getcallerfp()
4434 save(pc, sp, bp)
4435 gp.syscallsp = gp.sched.sp
4436 gp.syscallpc = gp.sched.pc
4437 gp.syscallbp = gp.sched.bp
4438 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4439 sp1 := sp
4440 sp2 := gp.sched.sp
4441 sp3 := gp.syscallsp
4442 systemstack(func() {
4443 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4444 throw("entersyscallblock")
4445 })
4446 }
4447 casgstatus(gp, _Grunning, _Gsyscall)
4448 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4449 systemstack(func() {
4450 print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4451 throw("entersyscallblock")
4452 })
4453 }
4454
4455 systemstack(entersyscallblock_handoff)
4456
4457
4458 save(getcallerpc(), getcallersp(), getcallerfp())
4459
4460 gp.m.locks--
4461 }
4462
4463 func entersyscallblock_handoff() {
4464 trace := traceAcquire()
4465 if trace.ok() {
4466 trace.GoSysCall()
4467 traceRelease(trace)
4468 }
4469 handoffp(releasep())
4470 }
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484 func exitsyscall() {
4485 gp := getg()
4486
4487 gp.m.locks++
4488 if getcallersp() > gp.syscallsp {
4489 throw("exitsyscall: syscall frame is no longer valid")
4490 }
4491
4492 gp.waitsince = 0
4493 oldp := gp.m.oldp.ptr()
4494 gp.m.oldp = 0
4495 if exitsyscallfast(oldp) {
4496
4497
4498 if goroutineProfile.active {
4499
4500
4501
4502 systemstack(func() {
4503 tryRecordGoroutineProfileWB(gp)
4504 })
4505 }
4506 trace := traceAcquire()
4507 if trace.ok() {
4508 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4509 systemstack(func() {
4510
4511
4512
4513
4514 trace.GoSysExit(lostP)
4515 if lostP {
4516
4517
4518
4519
4520 trace.GoStart()
4521 }
4522 })
4523 }
4524
4525 gp.m.p.ptr().syscalltick++
4526
4527 casgstatus(gp, _Gsyscall, _Grunning)
4528 if trace.ok() {
4529 traceRelease(trace)
4530 }
4531
4532
4533
4534 gp.syscallsp = 0
4535 gp.m.locks--
4536 if gp.preempt {
4537
4538 gp.stackguard0 = stackPreempt
4539 } else {
4540
4541 gp.stackguard0 = gp.stack.lo + stackGuard
4542 }
4543 gp.throwsplit = false
4544
4545 if sched.disable.user && !schedEnabled(gp) {
4546
4547 Gosched()
4548 }
4549
4550 return
4551 }
4552
4553 gp.m.locks--
4554
4555
4556 mcall(exitsyscall0)
4557
4558
4559
4560
4561
4562
4563
4564 gp.syscallsp = 0
4565 gp.m.p.ptr().syscalltick++
4566 gp.throwsplit = false
4567 }
4568
4569
4570 func exitsyscallfast(oldp *p) bool {
4571
4572 if sched.stopwait == freezeStopWait {
4573 return false
4574 }
4575
4576
4577 trace := traceAcquire()
4578 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4579
4580 wirep(oldp)
4581 exitsyscallfast_reacquired(trace)
4582 if trace.ok() {
4583 traceRelease(trace)
4584 }
4585 return true
4586 }
4587 if trace.ok() {
4588 traceRelease(trace)
4589 }
4590
4591
4592 if sched.pidle != 0 {
4593 var ok bool
4594 systemstack(func() {
4595 ok = exitsyscallfast_pidle()
4596 })
4597 if ok {
4598 return true
4599 }
4600 }
4601 return false
4602 }
4603
4604
4605
4606
4607
4608
4609 func exitsyscallfast_reacquired(trace traceLocker) {
4610 gp := getg()
4611 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4612 if trace.ok() {
4613
4614
4615
4616 systemstack(func() {
4617
4618
4619 trace.ProcSteal(gp.m.p.ptr(), true)
4620 trace.ProcStart()
4621 })
4622 }
4623 gp.m.p.ptr().syscalltick++
4624 }
4625 }
4626
4627 func exitsyscallfast_pidle() bool {
4628 lock(&sched.lock)
4629 pp, _ := pidleget(0)
4630 if pp != nil && sched.sysmonwait.Load() {
4631 sched.sysmonwait.Store(false)
4632 notewakeup(&sched.sysmonnote)
4633 }
4634 unlock(&sched.lock)
4635 if pp != nil {
4636 acquirep(pp)
4637 return true
4638 }
4639 return false
4640 }
4641
4642
4643
4644
4645
4646
4647
4648 func exitsyscall0(gp *g) {
4649 var trace traceLocker
4650 traceExitingSyscall()
4651 trace = traceAcquire()
4652 casgstatus(gp, _Gsyscall, _Grunnable)
4653 traceExitedSyscall()
4654 if trace.ok() {
4655
4656
4657
4658
4659 trace.GoSysExit(true)
4660 traceRelease(trace)
4661 }
4662 dropg()
4663 lock(&sched.lock)
4664 var pp *p
4665 if schedEnabled(gp) {
4666 pp, _ = pidleget(0)
4667 }
4668 var locked bool
4669 if pp == nil {
4670 globrunqput(gp)
4671
4672
4673
4674
4675
4676
4677 locked = gp.lockedm != 0
4678 } else if sched.sysmonwait.Load() {
4679 sched.sysmonwait.Store(false)
4680 notewakeup(&sched.sysmonnote)
4681 }
4682 unlock(&sched.lock)
4683 if pp != nil {
4684 acquirep(pp)
4685 execute(gp, false)
4686 }
4687 if locked {
4688
4689
4690
4691
4692 stoplockedm()
4693 execute(gp, false)
4694 }
4695 stopm()
4696 schedule()
4697 }
4698
4699
4700
4701
4702
4703 func syscall_runtime_BeforeFork() {
4704 gp := getg().m.curg
4705
4706
4707
4708
4709 gp.m.locks++
4710 sigsave(&gp.m.sigmask)
4711 sigblock(false)
4712
4713
4714
4715
4716
4717 gp.stackguard0 = stackFork
4718 }
4719
4720
4721
4722
4723
4724 func syscall_runtime_AfterFork() {
4725 gp := getg().m.curg
4726
4727
4728 gp.stackguard0 = gp.stack.lo + stackGuard
4729
4730 msigrestore(gp.m.sigmask)
4731
4732 gp.m.locks--
4733 }
4734
4735
4736
4737 var inForkedChild bool
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750 func syscall_runtime_AfterForkInChild() {
4751
4752
4753
4754
4755 inForkedChild = true
4756
4757 clearSignalHandlers()
4758
4759
4760
4761 msigrestore(getg().m.sigmask)
4762
4763 inForkedChild = false
4764 }
4765
4766
4767
4768
4769 var pendingPreemptSignals atomic.Int32
4770
4771
4772
4773
4774 func syscall_runtime_BeforeExec() {
4775
4776 execLock.lock()
4777
4778
4779
4780 if GOOS == "darwin" || GOOS == "ios" {
4781 for pendingPreemptSignals.Load() > 0 {
4782 osyield()
4783 }
4784 }
4785 }
4786
4787
4788
4789
4790 func syscall_runtime_AfterExec() {
4791 execLock.unlock()
4792 }
4793
4794
4795 func malg(stacksize int32) *g {
4796 newg := new(g)
4797 if stacksize >= 0 {
4798 stacksize = round2(stackSystem + stacksize)
4799 systemstack(func() {
4800 newg.stack = stackalloc(uint32(stacksize))
4801 })
4802 newg.stackguard0 = newg.stack.lo + stackGuard
4803 newg.stackguard1 = ^uintptr(0)
4804
4805
4806 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4807 }
4808 return newg
4809 }
4810
4811
4812
4813
4814 func newproc(fn *funcval) {
4815 gp := getg()
4816 pc := getcallerpc()
4817 systemstack(func() {
4818 newg := newproc1(fn, gp, pc, false, waitReasonZero)
4819
4820 pp := getg().m.p.ptr()
4821 runqput(pp, newg, true)
4822
4823 if mainStarted {
4824 wakep()
4825 }
4826 })
4827 }
4828
4829
4830
4831
4832 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
4833 if fn == nil {
4834 fatal("go of nil func value")
4835 }
4836
4837 mp := acquirem()
4838 pp := mp.p.ptr()
4839 newg := gfget(pp)
4840 if newg == nil {
4841 newg = malg(stackMin)
4842 casgstatus(newg, _Gidle, _Gdead)
4843 allgadd(newg)
4844 }
4845 if newg.stack.hi == 0 {
4846 throw("newproc1: newg missing stack")
4847 }
4848
4849 if readgstatus(newg) != _Gdead {
4850 throw("newproc1: new g is not Gdead")
4851 }
4852
4853 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
4854 totalSize = alignUp(totalSize, sys.StackAlign)
4855 sp := newg.stack.hi - totalSize
4856 if usesLR {
4857
4858 *(*uintptr)(unsafe.Pointer(sp)) = 0
4859 prepGoExitFrame(sp)
4860 }
4861 if GOARCH == "arm64" {
4862
4863 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
4864 }
4865
4866 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4867 newg.sched.sp = sp
4868 newg.stktopsp = sp
4869 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4870 newg.sched.g = guintptr(unsafe.Pointer(newg))
4871 gostartcallfn(&newg.sched, fn)
4872 newg.parentGoid = callergp.goid
4873 newg.gopc = callerpc
4874 newg.ancestors = saveAncestors(callergp)
4875 newg.startpc = fn.fn
4876 if isSystemGoroutine(newg, false) {
4877 sched.ngsys.Add(1)
4878 } else {
4879
4880 if mp.curg != nil {
4881 newg.labels = mp.curg.labels
4882 }
4883 if goroutineProfile.active {
4884
4885
4886
4887
4888
4889 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
4890 }
4891 }
4892
4893 newg.trackingSeq = uint8(cheaprand())
4894 if newg.trackingSeq%gTrackingPeriod == 0 {
4895 newg.tracking = true
4896 }
4897 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
4898
4899
4900 trace := traceAcquire()
4901 var status uint32 = _Grunnable
4902 if parked {
4903 status = _Gwaiting
4904 newg.waitreason = waitreason
4905 }
4906 casgstatus(newg, _Gdead, status)
4907 if pp.goidcache == pp.goidcacheend {
4908
4909
4910
4911 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
4912 pp.goidcache -= _GoidCacheBatch - 1
4913 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
4914 }
4915 newg.goid = pp.goidcache
4916 pp.goidcache++
4917 newg.trace.reset()
4918 if trace.ok() {
4919 trace.GoCreate(newg, newg.startpc, parked)
4920 traceRelease(trace)
4921 }
4922
4923
4924 if raceenabled {
4925 newg.racectx = racegostart(callerpc)
4926 newg.raceignore = 0
4927 if newg.labels != nil {
4928
4929
4930 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
4931 }
4932 }
4933 releasem(mp)
4934
4935 return newg
4936 }
4937
4938
4939
4940
4941 func saveAncestors(callergp *g) *[]ancestorInfo {
4942
4943 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4944 return nil
4945 }
4946 var callerAncestors []ancestorInfo
4947 if callergp.ancestors != nil {
4948 callerAncestors = *callergp.ancestors
4949 }
4950 n := int32(len(callerAncestors)) + 1
4951 if n > debug.tracebackancestors {
4952 n = debug.tracebackancestors
4953 }
4954 ancestors := make([]ancestorInfo, n)
4955 copy(ancestors[1:], callerAncestors)
4956
4957 var pcs [tracebackInnerFrames]uintptr
4958 npcs := gcallers(callergp, 0, pcs[:])
4959 ipcs := make([]uintptr, npcs)
4960 copy(ipcs, pcs[:])
4961 ancestors[0] = ancestorInfo{
4962 pcs: ipcs,
4963 goid: callergp.goid,
4964 gopc: callergp.gopc,
4965 }
4966
4967 ancestorsp := new([]ancestorInfo)
4968 *ancestorsp = ancestors
4969 return ancestorsp
4970 }
4971
4972
4973
4974 func gfput(pp *p, gp *g) {
4975 if readgstatus(gp) != _Gdead {
4976 throw("gfput: bad status (not Gdead)")
4977 }
4978
4979 stksize := gp.stack.hi - gp.stack.lo
4980
4981 if stksize != uintptr(startingStackSize) {
4982
4983 stackfree(gp.stack)
4984 gp.stack.lo = 0
4985 gp.stack.hi = 0
4986 gp.stackguard0 = 0
4987 }
4988
4989 pp.gFree.push(gp)
4990 pp.gFree.n++
4991 if pp.gFree.n >= 64 {
4992 var (
4993 inc int32
4994 stackQ gQueue
4995 noStackQ gQueue
4996 )
4997 for pp.gFree.n >= 32 {
4998 gp := pp.gFree.pop()
4999 pp.gFree.n--
5000 if gp.stack.lo == 0 {
5001 noStackQ.push(gp)
5002 } else {
5003 stackQ.push(gp)
5004 }
5005 inc++
5006 }
5007 lock(&sched.gFree.lock)
5008 sched.gFree.noStack.pushAll(noStackQ)
5009 sched.gFree.stack.pushAll(stackQ)
5010 sched.gFree.n += inc
5011 unlock(&sched.gFree.lock)
5012 }
5013 }
5014
5015
5016
5017 func gfget(pp *p) *g {
5018 retry:
5019 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5020 lock(&sched.gFree.lock)
5021
5022 for pp.gFree.n < 32 {
5023
5024 gp := sched.gFree.stack.pop()
5025 if gp == nil {
5026 gp = sched.gFree.noStack.pop()
5027 if gp == nil {
5028 break
5029 }
5030 }
5031 sched.gFree.n--
5032 pp.gFree.push(gp)
5033 pp.gFree.n++
5034 }
5035 unlock(&sched.gFree.lock)
5036 goto retry
5037 }
5038 gp := pp.gFree.pop()
5039 if gp == nil {
5040 return nil
5041 }
5042 pp.gFree.n--
5043 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5044
5045
5046
5047 systemstack(func() {
5048 stackfree(gp.stack)
5049 gp.stack.lo = 0
5050 gp.stack.hi = 0
5051 gp.stackguard0 = 0
5052 })
5053 }
5054 if gp.stack.lo == 0 {
5055
5056 systemstack(func() {
5057 gp.stack = stackalloc(startingStackSize)
5058 })
5059 gp.stackguard0 = gp.stack.lo + stackGuard
5060 } else {
5061 if raceenabled {
5062 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5063 }
5064 if msanenabled {
5065 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5066 }
5067 if asanenabled {
5068 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5069 }
5070 }
5071 return gp
5072 }
5073
5074
5075 func gfpurge(pp *p) {
5076 var (
5077 inc int32
5078 stackQ gQueue
5079 noStackQ gQueue
5080 )
5081 for !pp.gFree.empty() {
5082 gp := pp.gFree.pop()
5083 pp.gFree.n--
5084 if gp.stack.lo == 0 {
5085 noStackQ.push(gp)
5086 } else {
5087 stackQ.push(gp)
5088 }
5089 inc++
5090 }
5091 lock(&sched.gFree.lock)
5092 sched.gFree.noStack.pushAll(noStackQ)
5093 sched.gFree.stack.pushAll(stackQ)
5094 sched.gFree.n += inc
5095 unlock(&sched.gFree.lock)
5096 }
5097
5098
5099 func Breakpoint() {
5100 breakpoint()
5101 }
5102
5103
5104
5105
5106
5107
5108 func dolockOSThread() {
5109 if GOARCH == "wasm" {
5110 return
5111 }
5112 gp := getg()
5113 gp.m.lockedg.set(gp)
5114 gp.lockedm.set(gp.m)
5115 }
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133 func LockOSThread() {
5134 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5135
5136
5137
5138 startTemplateThread()
5139 }
5140 gp := getg()
5141 gp.m.lockedExt++
5142 if gp.m.lockedExt == 0 {
5143 gp.m.lockedExt--
5144 panic("LockOSThread nesting overflow")
5145 }
5146 dolockOSThread()
5147 }
5148
5149
5150 func lockOSThread() {
5151 getg().m.lockedInt++
5152 dolockOSThread()
5153 }
5154
5155
5156
5157
5158
5159
5160 func dounlockOSThread() {
5161 if GOARCH == "wasm" {
5162 return
5163 }
5164 gp := getg()
5165 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5166 return
5167 }
5168 gp.m.lockedg = 0
5169 gp.lockedm = 0
5170 }
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186 func UnlockOSThread() {
5187 gp := getg()
5188 if gp.m.lockedExt == 0 {
5189 return
5190 }
5191 gp.m.lockedExt--
5192 dounlockOSThread()
5193 }
5194
5195
5196 func unlockOSThread() {
5197 gp := getg()
5198 if gp.m.lockedInt == 0 {
5199 systemstack(badunlockosthread)
5200 }
5201 gp.m.lockedInt--
5202 dounlockOSThread()
5203 }
5204
5205 func badunlockosthread() {
5206 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5207 }
5208
5209 func gcount() int32 {
5210 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5211 for _, pp := range allp {
5212 n -= pp.gFree.n
5213 }
5214
5215
5216
5217 if n < 1 {
5218 n = 1
5219 }
5220 return n
5221 }
5222
5223 func mcount() int32 {
5224 return int32(sched.mnext - sched.nmfreed)
5225 }
5226
5227 var prof struct {
5228 signalLock atomic.Uint32
5229
5230
5231
5232 hz atomic.Int32
5233 }
5234
5235 func _System() { _System() }
5236 func _ExternalCode() { _ExternalCode() }
5237 func _LostExternalCode() { _LostExternalCode() }
5238 func _GC() { _GC() }
5239 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5240 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5241 func _VDSO() { _VDSO() }
5242
5243
5244
5245
5246
5247 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5248 if prof.hz.Load() == 0 {
5249 return
5250 }
5251
5252
5253
5254
5255 if mp != nil && mp.profilehz == 0 {
5256 return
5257 }
5258
5259
5260
5261
5262
5263
5264
5265 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5266 if f := findfunc(pc); f.valid() {
5267 if hasPrefix(funcname(f), "internal/runtime/atomic") {
5268 cpuprof.lostAtomic++
5269 return
5270 }
5271 }
5272 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5273
5274
5275
5276 cpuprof.lostAtomic++
5277 return
5278 }
5279 }
5280
5281
5282
5283
5284
5285
5286
5287 getg().m.mallocing++
5288
5289 var u unwinder
5290 var stk [maxCPUProfStack]uintptr
5291 n := 0
5292 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5293 cgoOff := 0
5294
5295
5296
5297
5298
5299 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5300 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5301 cgoOff++
5302 }
5303 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5304 mp.cgoCallers[0] = 0
5305 }
5306
5307
5308 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5309 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5310
5311
5312 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5313 } else if mp != nil && mp.vdsoSP != 0 {
5314
5315
5316 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5317 } else {
5318 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5319 }
5320 n += tracebackPCs(&u, 0, stk[n:])
5321
5322 if n <= 0 {
5323
5324
5325 n = 2
5326 if inVDSOPage(pc) {
5327 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5328 } else if pc > firstmoduledata.etext {
5329
5330 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5331 }
5332 stk[0] = pc
5333 if mp.preemptoff != "" {
5334 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5335 } else {
5336 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5337 }
5338 }
5339
5340 if prof.hz.Load() != 0 {
5341
5342
5343
5344 var tagPtr *unsafe.Pointer
5345 if gp != nil && gp.m != nil && gp.m.curg != nil {
5346 tagPtr = &gp.m.curg.labels
5347 }
5348 cpuprof.add(tagPtr, stk[:n])
5349
5350 gprof := gp
5351 var mp *m
5352 var pp *p
5353 if gp != nil && gp.m != nil {
5354 if gp.m.curg != nil {
5355 gprof = gp.m.curg
5356 }
5357 mp = gp.m
5358 pp = gp.m.p.ptr()
5359 }
5360 traceCPUSample(gprof, mp, pp, stk[:n])
5361 }
5362 getg().m.mallocing--
5363 }
5364
5365
5366
5367 func setcpuprofilerate(hz int32) {
5368
5369 if hz < 0 {
5370 hz = 0
5371 }
5372
5373
5374
5375 gp := getg()
5376 gp.m.locks++
5377
5378
5379
5380
5381 setThreadCPUProfiler(0)
5382
5383 for !prof.signalLock.CompareAndSwap(0, 1) {
5384 osyield()
5385 }
5386 if prof.hz.Load() != hz {
5387 setProcessCPUProfiler(hz)
5388 prof.hz.Store(hz)
5389 }
5390 prof.signalLock.Store(0)
5391
5392 lock(&sched.lock)
5393 sched.profilehz = hz
5394 unlock(&sched.lock)
5395
5396 if hz != 0 {
5397 setThreadCPUProfiler(hz)
5398 }
5399
5400 gp.m.locks--
5401 }
5402
5403
5404
5405 func (pp *p) init(id int32) {
5406 pp.id = id
5407 pp.status = _Pgcstop
5408 pp.sudogcache = pp.sudogbuf[:0]
5409 pp.deferpool = pp.deferpoolbuf[:0]
5410 pp.wbBuf.reset()
5411 if pp.mcache == nil {
5412 if id == 0 {
5413 if mcache0 == nil {
5414 throw("missing mcache?")
5415 }
5416
5417
5418 pp.mcache = mcache0
5419 } else {
5420 pp.mcache = allocmcache()
5421 }
5422 }
5423 if raceenabled && pp.raceprocctx == 0 {
5424 if id == 0 {
5425 pp.raceprocctx = raceprocctx0
5426 raceprocctx0 = 0
5427 } else {
5428 pp.raceprocctx = raceproccreate()
5429 }
5430 }
5431 lockInit(&pp.timers.mu, lockRankTimers)
5432
5433
5434
5435 timerpMask.set(id)
5436
5437
5438 idlepMask.clear(id)
5439 }
5440
5441
5442
5443
5444
5445 func (pp *p) destroy() {
5446 assertLockHeld(&sched.lock)
5447 assertWorldStopped()
5448
5449
5450 for pp.runqhead != pp.runqtail {
5451
5452 pp.runqtail--
5453 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5454
5455 globrunqputhead(gp)
5456 }
5457 if pp.runnext != 0 {
5458 globrunqputhead(pp.runnext.ptr())
5459 pp.runnext = 0
5460 }
5461
5462
5463 getg().m.p.ptr().timers.take(&pp.timers)
5464
5465
5466 if gcphase != _GCoff {
5467 wbBufFlush1(pp)
5468 pp.gcw.dispose()
5469 }
5470 for i := range pp.sudogbuf {
5471 pp.sudogbuf[i] = nil
5472 }
5473 pp.sudogcache = pp.sudogbuf[:0]
5474 pp.pinnerCache = nil
5475 for j := range pp.deferpoolbuf {
5476 pp.deferpoolbuf[j] = nil
5477 }
5478 pp.deferpool = pp.deferpoolbuf[:0]
5479 systemstack(func() {
5480 for i := 0; i < pp.mspancache.len; i++ {
5481
5482 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5483 }
5484 pp.mspancache.len = 0
5485 lock(&mheap_.lock)
5486 pp.pcache.flush(&mheap_.pages)
5487 unlock(&mheap_.lock)
5488 })
5489 freemcache(pp.mcache)
5490 pp.mcache = nil
5491 gfpurge(pp)
5492 traceProcFree(pp)
5493 if raceenabled {
5494 if pp.timers.raceCtx != 0 {
5495
5496
5497
5498
5499
5500 mp := getg().m
5501 phold := mp.p.ptr()
5502 mp.p.set(pp)
5503
5504 racectxend(pp.timers.raceCtx)
5505 pp.timers.raceCtx = 0
5506
5507 mp.p.set(phold)
5508 }
5509 raceprocdestroy(pp.raceprocctx)
5510 pp.raceprocctx = 0
5511 }
5512 pp.gcAssistTime = 0
5513 pp.status = _Pdead
5514 }
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524 func procresize(nprocs int32) *p {
5525 assertLockHeld(&sched.lock)
5526 assertWorldStopped()
5527
5528 old := gomaxprocs
5529 if old < 0 || nprocs <= 0 {
5530 throw("procresize: invalid arg")
5531 }
5532 trace := traceAcquire()
5533 if trace.ok() {
5534 trace.Gomaxprocs(nprocs)
5535 traceRelease(trace)
5536 }
5537
5538
5539 now := nanotime()
5540 if sched.procresizetime != 0 {
5541 sched.totaltime += int64(old) * (now - sched.procresizetime)
5542 }
5543 sched.procresizetime = now
5544
5545 maskWords := (nprocs + 31) / 32
5546
5547
5548 if nprocs > int32(len(allp)) {
5549
5550
5551 lock(&allpLock)
5552 if nprocs <= int32(cap(allp)) {
5553 allp = allp[:nprocs]
5554 } else {
5555 nallp := make([]*p, nprocs)
5556
5557
5558 copy(nallp, allp[:cap(allp)])
5559 allp = nallp
5560 }
5561
5562 if maskWords <= int32(cap(idlepMask)) {
5563 idlepMask = idlepMask[:maskWords]
5564 timerpMask = timerpMask[:maskWords]
5565 } else {
5566 nidlepMask := make([]uint32, maskWords)
5567
5568 copy(nidlepMask, idlepMask)
5569 idlepMask = nidlepMask
5570
5571 ntimerpMask := make([]uint32, maskWords)
5572 copy(ntimerpMask, timerpMask)
5573 timerpMask = ntimerpMask
5574 }
5575 unlock(&allpLock)
5576 }
5577
5578
5579 for i := old; i < nprocs; i++ {
5580 pp := allp[i]
5581 if pp == nil {
5582 pp = new(p)
5583 }
5584 pp.init(i)
5585 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5586 }
5587
5588 gp := getg()
5589 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5590
5591 gp.m.p.ptr().status = _Prunning
5592 gp.m.p.ptr().mcache.prepareForSweep()
5593 } else {
5594
5595
5596
5597
5598
5599 if gp.m.p != 0 {
5600 trace := traceAcquire()
5601 if trace.ok() {
5602
5603
5604
5605 trace.GoSched()
5606 trace.ProcStop(gp.m.p.ptr())
5607 traceRelease(trace)
5608 }
5609 gp.m.p.ptr().m = 0
5610 }
5611 gp.m.p = 0
5612 pp := allp[0]
5613 pp.m = 0
5614 pp.status = _Pidle
5615 acquirep(pp)
5616 trace := traceAcquire()
5617 if trace.ok() {
5618 trace.GoStart()
5619 traceRelease(trace)
5620 }
5621 }
5622
5623
5624 mcache0 = nil
5625
5626
5627 for i := nprocs; i < old; i++ {
5628 pp := allp[i]
5629 pp.destroy()
5630
5631 }
5632
5633
5634 if int32(len(allp)) != nprocs {
5635 lock(&allpLock)
5636 allp = allp[:nprocs]
5637 idlepMask = idlepMask[:maskWords]
5638 timerpMask = timerpMask[:maskWords]
5639 unlock(&allpLock)
5640 }
5641
5642 var runnablePs *p
5643 for i := nprocs - 1; i >= 0; i-- {
5644 pp := allp[i]
5645 if gp.m.p.ptr() == pp {
5646 continue
5647 }
5648 pp.status = _Pidle
5649 if runqempty(pp) {
5650 pidleput(pp, now)
5651 } else {
5652 pp.m.set(mget())
5653 pp.link.set(runnablePs)
5654 runnablePs = pp
5655 }
5656 }
5657 stealOrder.reset(uint32(nprocs))
5658 var int32p *int32 = &gomaxprocs
5659 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5660 if old != nprocs {
5661
5662 gcCPULimiter.resetCapacity(now, nprocs)
5663 }
5664 return runnablePs
5665 }
5666
5667
5668
5669
5670
5671
5672
5673 func acquirep(pp *p) {
5674
5675 wirep(pp)
5676
5677
5678
5679
5680
5681 pp.mcache.prepareForSweep()
5682
5683 trace := traceAcquire()
5684 if trace.ok() {
5685 trace.ProcStart()
5686 traceRelease(trace)
5687 }
5688 }
5689
5690
5691
5692
5693
5694
5695
5696 func wirep(pp *p) {
5697 gp := getg()
5698
5699 if gp.m.p != 0 {
5700
5701
5702 systemstack(func() {
5703 throw("wirep: already in go")
5704 })
5705 }
5706 if pp.m != 0 || pp.status != _Pidle {
5707
5708
5709 systemstack(func() {
5710 id := int64(0)
5711 if pp.m != 0 {
5712 id = pp.m.ptr().id
5713 }
5714 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5715 throw("wirep: invalid p state")
5716 })
5717 }
5718 gp.m.p.set(pp)
5719 pp.m.set(gp.m)
5720 pp.status = _Prunning
5721 }
5722
5723
5724 func releasep() *p {
5725 trace := traceAcquire()
5726 if trace.ok() {
5727 trace.ProcStop(getg().m.p.ptr())
5728 traceRelease(trace)
5729 }
5730 return releasepNoTrace()
5731 }
5732
5733
5734 func releasepNoTrace() *p {
5735 gp := getg()
5736
5737 if gp.m.p == 0 {
5738 throw("releasep: invalid arg")
5739 }
5740 pp := gp.m.p.ptr()
5741 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5742 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5743 throw("releasep: invalid p state")
5744 }
5745 gp.m.p = 0
5746 pp.m = 0
5747 pp.status = _Pidle
5748 return pp
5749 }
5750
5751 func incidlelocked(v int32) {
5752 lock(&sched.lock)
5753 sched.nmidlelocked += v
5754 if v > 0 {
5755 checkdead()
5756 }
5757 unlock(&sched.lock)
5758 }
5759
5760
5761
5762
5763 func checkdead() {
5764 assertLockHeld(&sched.lock)
5765
5766
5767
5768
5769 if islibrary || isarchive {
5770 return
5771 }
5772
5773
5774
5775
5776
5777 if panicking.Load() > 0 {
5778 return
5779 }
5780
5781
5782
5783
5784
5785 var run0 int32
5786 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5787 run0 = 1
5788 }
5789
5790 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5791 if run > run0 {
5792 return
5793 }
5794 if run < 0 {
5795 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5796 unlock(&sched.lock)
5797 throw("checkdead: inconsistent counts")
5798 }
5799
5800 grunning := 0
5801 forEachG(func(gp *g) {
5802 if isSystemGoroutine(gp, false) {
5803 return
5804 }
5805 s := readgstatus(gp)
5806 switch s &^ _Gscan {
5807 case _Gwaiting,
5808 _Gpreempted:
5809 grunning++
5810 case _Grunnable,
5811 _Grunning,
5812 _Gsyscall:
5813 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5814 unlock(&sched.lock)
5815 throw("checkdead: runnable g")
5816 }
5817 })
5818 if grunning == 0 {
5819 unlock(&sched.lock)
5820 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5821 }
5822
5823
5824 if faketime != 0 {
5825 if when := timeSleepUntil(); when < maxWhen {
5826 faketime = when
5827
5828
5829 pp, _ := pidleget(faketime)
5830 if pp == nil {
5831
5832
5833 unlock(&sched.lock)
5834 throw("checkdead: no p for timer")
5835 }
5836 mp := mget()
5837 if mp == nil {
5838
5839
5840 unlock(&sched.lock)
5841 throw("checkdead: no m for timer")
5842 }
5843
5844
5845
5846 sched.nmspinning.Add(1)
5847 mp.spinning = true
5848 mp.nextp.set(pp)
5849 notewakeup(&mp.park)
5850 return
5851 }
5852 }
5853
5854
5855 for _, pp := range allp {
5856 if len(pp.timers.heap) > 0 {
5857 return
5858 }
5859 }
5860
5861 unlock(&sched.lock)
5862 fatal("all goroutines are asleep - deadlock!")
5863 }
5864
5865
5866
5867
5868
5869
5870 var forcegcperiod int64 = 2 * 60 * 1e9
5871
5872
5873
5874 var needSysmonWorkaround bool = false
5875
5876
5877
5878
5879 const haveSysmon = GOARCH != "wasm"
5880
5881
5882
5883
5884 func sysmon() {
5885 lock(&sched.lock)
5886 sched.nmsys++
5887 checkdead()
5888 unlock(&sched.lock)
5889
5890 lasttrace := int64(0)
5891 idle := 0
5892 delay := uint32(0)
5893
5894 for {
5895 if idle == 0 {
5896 delay = 20
5897 } else if idle > 50 {
5898 delay *= 2
5899 }
5900 if delay > 10*1000 {
5901 delay = 10 * 1000
5902 }
5903 usleep(delay)
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920 now := nanotime()
5921 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
5922 lock(&sched.lock)
5923 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
5924 syscallWake := false
5925 next := timeSleepUntil()
5926 if next > now {
5927 sched.sysmonwait.Store(true)
5928 unlock(&sched.lock)
5929
5930
5931 sleep := forcegcperiod / 2
5932 if next-now < sleep {
5933 sleep = next - now
5934 }
5935 shouldRelax := sleep >= osRelaxMinNS
5936 if shouldRelax {
5937 osRelax(true)
5938 }
5939 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5940 if shouldRelax {
5941 osRelax(false)
5942 }
5943 lock(&sched.lock)
5944 sched.sysmonwait.Store(false)
5945 noteclear(&sched.sysmonnote)
5946 }
5947 if syscallWake {
5948 idle = 0
5949 delay = 20
5950 }
5951 }
5952 unlock(&sched.lock)
5953 }
5954
5955 lock(&sched.sysmonlock)
5956
5957
5958 now = nanotime()
5959
5960
5961 if *cgo_yield != nil {
5962 asmcgocall(*cgo_yield, nil)
5963 }
5964
5965 lastpoll := sched.lastpoll.Load()
5966 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5967 sched.lastpoll.CompareAndSwap(lastpoll, now)
5968 list, delta := netpoll(0)
5969 if !list.empty() {
5970
5971
5972
5973
5974
5975
5976
5977 incidlelocked(-1)
5978 injectglist(&list)
5979 incidlelocked(1)
5980 netpollAdjustWaiters(delta)
5981 }
5982 }
5983 if GOOS == "netbsd" && needSysmonWorkaround {
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999 if next := timeSleepUntil(); next < now {
6000 startm(nil, false, false)
6001 }
6002 }
6003 if scavenger.sysmonWake.Load() != 0 {
6004
6005 scavenger.wake()
6006 }
6007
6008
6009 if retake(now) != 0 {
6010 idle = 0
6011 } else {
6012 idle++
6013 }
6014
6015 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6016 lock(&forcegc.lock)
6017 forcegc.idle.Store(false)
6018 var list gList
6019 list.push(forcegc.g)
6020 injectglist(&list)
6021 unlock(&forcegc.lock)
6022 }
6023 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6024 lasttrace = now
6025 schedtrace(debug.scheddetail > 0)
6026 }
6027 unlock(&sched.sysmonlock)
6028 }
6029 }
6030
6031 type sysmontick struct {
6032 schedtick uint32
6033 syscalltick uint32
6034 schedwhen int64
6035 syscallwhen int64
6036 }
6037
6038
6039
6040 const forcePreemptNS = 10 * 1000 * 1000
6041
6042 func retake(now int64) uint32 {
6043 n := 0
6044
6045
6046 lock(&allpLock)
6047
6048
6049
6050 for i := 0; i < len(allp); i++ {
6051 pp := allp[i]
6052 if pp == nil {
6053
6054
6055 continue
6056 }
6057 pd := &pp.sysmontick
6058 s := pp.status
6059 sysretake := false
6060 if s == _Prunning || s == _Psyscall {
6061
6062
6063
6064
6065 t := int64(pp.schedtick)
6066 if int64(pd.schedtick) != t {
6067 pd.schedtick = uint32(t)
6068 pd.schedwhen = now
6069 } else if pd.schedwhen+forcePreemptNS <= now {
6070 preemptone(pp)
6071
6072
6073 sysretake = true
6074 }
6075 }
6076 if s == _Psyscall {
6077
6078 t := int64(pp.syscalltick)
6079 if !sysretake && int64(pd.syscalltick) != t {
6080 pd.syscalltick = uint32(t)
6081 pd.syscallwhen = now
6082 continue
6083 }
6084
6085
6086
6087 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6088 continue
6089 }
6090
6091 unlock(&allpLock)
6092
6093
6094
6095
6096 incidlelocked(-1)
6097 trace := traceAcquire()
6098 if atomic.Cas(&pp.status, s, _Pidle) {
6099 if trace.ok() {
6100 trace.ProcSteal(pp, false)
6101 traceRelease(trace)
6102 }
6103 n++
6104 pp.syscalltick++
6105 handoffp(pp)
6106 } else if trace.ok() {
6107 traceRelease(trace)
6108 }
6109 incidlelocked(1)
6110 lock(&allpLock)
6111 }
6112 }
6113 unlock(&allpLock)
6114 return uint32(n)
6115 }
6116
6117
6118
6119
6120
6121
6122 func preemptall() bool {
6123 res := false
6124 for _, pp := range allp {
6125 if pp.status != _Prunning {
6126 continue
6127 }
6128 if preemptone(pp) {
6129 res = true
6130 }
6131 }
6132 return res
6133 }
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145 func preemptone(pp *p) bool {
6146 mp := pp.m.ptr()
6147 if mp == nil || mp == getg().m {
6148 return false
6149 }
6150 gp := mp.curg
6151 if gp == nil || gp == mp.g0 {
6152 return false
6153 }
6154
6155 gp.preempt = true
6156
6157
6158
6159
6160
6161 gp.stackguard0 = stackPreempt
6162
6163
6164 if preemptMSupported && debug.asyncpreemptoff == 0 {
6165 pp.preempt = true
6166 preemptM(mp)
6167 }
6168
6169 return true
6170 }
6171
6172 var starttime int64
6173
6174 func schedtrace(detailed bool) {
6175 now := nanotime()
6176 if starttime == 0 {
6177 starttime = now
6178 }
6179
6180 lock(&sched.lock)
6181 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6182 if detailed {
6183 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6184 }
6185
6186
6187
6188 for i, pp := range allp {
6189 mp := pp.m.ptr()
6190 h := atomic.Load(&pp.runqhead)
6191 t := atomic.Load(&pp.runqtail)
6192 if detailed {
6193 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6194 if mp != nil {
6195 print(mp.id)
6196 } else {
6197 print("nil")
6198 }
6199 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6200 } else {
6201
6202
6203 print(" ")
6204 if i == 0 {
6205 print("[")
6206 }
6207 print(t - h)
6208 if i == len(allp)-1 {
6209 print("]\n")
6210 }
6211 }
6212 }
6213
6214 if !detailed {
6215 unlock(&sched.lock)
6216 return
6217 }
6218
6219 for mp := allm; mp != nil; mp = mp.alllink {
6220 pp := mp.p.ptr()
6221 print(" M", mp.id, ": p=")
6222 if pp != nil {
6223 print(pp.id)
6224 } else {
6225 print("nil")
6226 }
6227 print(" curg=")
6228 if mp.curg != nil {
6229 print(mp.curg.goid)
6230 } else {
6231 print("nil")
6232 }
6233 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6234 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6235 print(lockedg.goid)
6236 } else {
6237 print("nil")
6238 }
6239 print("\n")
6240 }
6241
6242 forEachG(func(gp *g) {
6243 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6244 if gp.m != nil {
6245 print(gp.m.id)
6246 } else {
6247 print("nil")
6248 }
6249 print(" lockedm=")
6250 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6251 print(lockedm.id)
6252 } else {
6253 print("nil")
6254 }
6255 print("\n")
6256 })
6257 unlock(&sched.lock)
6258 }
6259
6260
6261
6262
6263
6264
6265 func schedEnableUser(enable bool) {
6266 lock(&sched.lock)
6267 if sched.disable.user == !enable {
6268 unlock(&sched.lock)
6269 return
6270 }
6271 sched.disable.user = !enable
6272 if enable {
6273 n := sched.disable.n
6274 sched.disable.n = 0
6275 globrunqputbatch(&sched.disable.runnable, n)
6276 unlock(&sched.lock)
6277 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6278 startm(nil, false, false)
6279 }
6280 } else {
6281 unlock(&sched.lock)
6282 }
6283 }
6284
6285
6286
6287
6288
6289 func schedEnabled(gp *g) bool {
6290 assertLockHeld(&sched.lock)
6291
6292 if sched.disable.user {
6293 return isSystemGoroutine(gp, true)
6294 }
6295 return true
6296 }
6297
6298
6299
6300
6301
6302
6303 func mput(mp *m) {
6304 assertLockHeld(&sched.lock)
6305
6306 mp.schedlink = sched.midle
6307 sched.midle.set(mp)
6308 sched.nmidle++
6309 checkdead()
6310 }
6311
6312
6313
6314
6315
6316
6317 func mget() *m {
6318 assertLockHeld(&sched.lock)
6319
6320 mp := sched.midle.ptr()
6321 if mp != nil {
6322 sched.midle = mp.schedlink
6323 sched.nmidle--
6324 }
6325 return mp
6326 }
6327
6328
6329
6330
6331
6332
6333 func globrunqput(gp *g) {
6334 assertLockHeld(&sched.lock)
6335
6336 sched.runq.pushBack(gp)
6337 sched.runqsize++
6338 }
6339
6340
6341
6342
6343
6344
6345 func globrunqputhead(gp *g) {
6346 assertLockHeld(&sched.lock)
6347
6348 sched.runq.push(gp)
6349 sched.runqsize++
6350 }
6351
6352
6353
6354
6355
6356
6357
6358 func globrunqputbatch(batch *gQueue, n int32) {
6359 assertLockHeld(&sched.lock)
6360
6361 sched.runq.pushBackAll(*batch)
6362 sched.runqsize += n
6363 *batch = gQueue{}
6364 }
6365
6366
6367
6368 func globrunqget(pp *p, max int32) *g {
6369 assertLockHeld(&sched.lock)
6370
6371 if sched.runqsize == 0 {
6372 return nil
6373 }
6374
6375 n := sched.runqsize/gomaxprocs + 1
6376 if n > sched.runqsize {
6377 n = sched.runqsize
6378 }
6379 if max > 0 && n > max {
6380 n = max
6381 }
6382 if n > int32(len(pp.runq))/2 {
6383 n = int32(len(pp.runq)) / 2
6384 }
6385
6386 sched.runqsize -= n
6387
6388 gp := sched.runq.pop()
6389 n--
6390 for ; n > 0; n-- {
6391 gp1 := sched.runq.pop()
6392 runqput(pp, gp1, false)
6393 }
6394 return gp
6395 }
6396
6397
6398 type pMask []uint32
6399
6400
6401 func (p pMask) read(id uint32) bool {
6402 word := id / 32
6403 mask := uint32(1) << (id % 32)
6404 return (atomic.Load(&p[word]) & mask) != 0
6405 }
6406
6407
6408 func (p pMask) set(id int32) {
6409 word := id / 32
6410 mask := uint32(1) << (id % 32)
6411 atomic.Or(&p[word], mask)
6412 }
6413
6414
6415 func (p pMask) clear(id int32) {
6416 word := id / 32
6417 mask := uint32(1) << (id % 32)
6418 atomic.And(&p[word], ^mask)
6419 }
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432 func pidleput(pp *p, now int64) int64 {
6433 assertLockHeld(&sched.lock)
6434
6435 if !runqempty(pp) {
6436 throw("pidleput: P has non-empty run queue")
6437 }
6438 if now == 0 {
6439 now = nanotime()
6440 }
6441 if pp.timers.len.Load() == 0 {
6442 timerpMask.clear(pp.id)
6443 }
6444 idlepMask.set(pp.id)
6445 pp.link = sched.pidle
6446 sched.pidle.set(pp)
6447 sched.npidle.Add(1)
6448 if !pp.limiterEvent.start(limiterEventIdle, now) {
6449 throw("must be able to track idle limiter event")
6450 }
6451 return now
6452 }
6453
6454
6455
6456
6457
6458
6459
6460
6461 func pidleget(now int64) (*p, int64) {
6462 assertLockHeld(&sched.lock)
6463
6464 pp := sched.pidle.ptr()
6465 if pp != nil {
6466
6467 if now == 0 {
6468 now = nanotime()
6469 }
6470 timerpMask.set(pp.id)
6471 idlepMask.clear(pp.id)
6472 sched.pidle = pp.link
6473 sched.npidle.Add(-1)
6474 pp.limiterEvent.stop(limiterEventIdle, now)
6475 }
6476 return pp, now
6477 }
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489 func pidlegetSpinning(now int64) (*p, int64) {
6490 assertLockHeld(&sched.lock)
6491
6492 pp, now := pidleget(now)
6493 if pp == nil {
6494
6495
6496
6497 sched.needspinning.Store(1)
6498 return nil, now
6499 }
6500
6501 return pp, now
6502 }
6503
6504
6505
6506 func runqempty(pp *p) bool {
6507
6508
6509
6510
6511 for {
6512 head := atomic.Load(&pp.runqhead)
6513 tail := atomic.Load(&pp.runqtail)
6514 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6515 if tail == atomic.Load(&pp.runqtail) {
6516 return head == tail && runnext == 0
6517 }
6518 }
6519 }
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530 const randomizeScheduler = raceenabled
6531
6532
6533
6534
6535
6536
6537 func runqput(pp *p, gp *g, next bool) {
6538 if !haveSysmon && next {
6539
6540
6541
6542
6543
6544
6545
6546
6547 next = false
6548 }
6549 if randomizeScheduler && next && randn(2) == 0 {
6550 next = false
6551 }
6552
6553 if next {
6554 retryNext:
6555 oldnext := pp.runnext
6556 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6557 goto retryNext
6558 }
6559 if oldnext == 0 {
6560 return
6561 }
6562
6563 gp = oldnext.ptr()
6564 }
6565
6566 retry:
6567 h := atomic.LoadAcq(&pp.runqhead)
6568 t := pp.runqtail
6569 if t-h < uint32(len(pp.runq)) {
6570 pp.runq[t%uint32(len(pp.runq))].set(gp)
6571 atomic.StoreRel(&pp.runqtail, t+1)
6572 return
6573 }
6574 if runqputslow(pp, gp, h, t) {
6575 return
6576 }
6577
6578 goto retry
6579 }
6580
6581
6582
6583 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6584 var batch [len(pp.runq)/2 + 1]*g
6585
6586
6587 n := t - h
6588 n = n / 2
6589 if n != uint32(len(pp.runq)/2) {
6590 throw("runqputslow: queue is not full")
6591 }
6592 for i := uint32(0); i < n; i++ {
6593 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6594 }
6595 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6596 return false
6597 }
6598 batch[n] = gp
6599
6600 if randomizeScheduler {
6601 for i := uint32(1); i <= n; i++ {
6602 j := cheaprandn(i + 1)
6603 batch[i], batch[j] = batch[j], batch[i]
6604 }
6605 }
6606
6607
6608 for i := uint32(0); i < n; i++ {
6609 batch[i].schedlink.set(batch[i+1])
6610 }
6611 var q gQueue
6612 q.head.set(batch[0])
6613 q.tail.set(batch[n])
6614
6615
6616 lock(&sched.lock)
6617 globrunqputbatch(&q, int32(n+1))
6618 unlock(&sched.lock)
6619 return true
6620 }
6621
6622
6623
6624
6625
6626 func runqputbatch(pp *p, q *gQueue, qsize int) {
6627 h := atomic.LoadAcq(&pp.runqhead)
6628 t := pp.runqtail
6629 n := uint32(0)
6630 for !q.empty() && t-h < uint32(len(pp.runq)) {
6631 gp := q.pop()
6632 pp.runq[t%uint32(len(pp.runq))].set(gp)
6633 t++
6634 n++
6635 }
6636 qsize -= int(n)
6637
6638 if randomizeScheduler {
6639 off := func(o uint32) uint32 {
6640 return (pp.runqtail + o) % uint32(len(pp.runq))
6641 }
6642 for i := uint32(1); i < n; i++ {
6643 j := cheaprandn(i + 1)
6644 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6645 }
6646 }
6647
6648 atomic.StoreRel(&pp.runqtail, t)
6649 if !q.empty() {
6650 lock(&sched.lock)
6651 globrunqputbatch(q, int32(qsize))
6652 unlock(&sched.lock)
6653 }
6654 }
6655
6656
6657
6658
6659
6660 func runqget(pp *p) (gp *g, inheritTime bool) {
6661
6662 next := pp.runnext
6663
6664
6665
6666 if next != 0 && pp.runnext.cas(next, 0) {
6667 return next.ptr(), true
6668 }
6669
6670 for {
6671 h := atomic.LoadAcq(&pp.runqhead)
6672 t := pp.runqtail
6673 if t == h {
6674 return nil, false
6675 }
6676 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6677 if atomic.CasRel(&pp.runqhead, h, h+1) {
6678 return gp, false
6679 }
6680 }
6681 }
6682
6683
6684
6685 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6686 oldNext := pp.runnext
6687 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6688 drainQ.pushBack(oldNext.ptr())
6689 n++
6690 }
6691
6692 retry:
6693 h := atomic.LoadAcq(&pp.runqhead)
6694 t := pp.runqtail
6695 qn := t - h
6696 if qn == 0 {
6697 return
6698 }
6699 if qn > uint32(len(pp.runq)) {
6700 goto retry
6701 }
6702
6703 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6704 goto retry
6705 }
6706
6707
6708
6709
6710
6711
6712
6713
6714 for i := uint32(0); i < qn; i++ {
6715 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6716 drainQ.pushBack(gp)
6717 n++
6718 }
6719 return
6720 }
6721
6722
6723
6724
6725
6726 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6727 for {
6728 h := atomic.LoadAcq(&pp.runqhead)
6729 t := atomic.LoadAcq(&pp.runqtail)
6730 n := t - h
6731 n = n - n/2
6732 if n == 0 {
6733 if stealRunNextG {
6734
6735 if next := pp.runnext; next != 0 {
6736 if pp.status == _Prunning {
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747 if !osHasLowResTimer {
6748 usleep(3)
6749 } else {
6750
6751
6752
6753 osyield()
6754 }
6755 }
6756 if !pp.runnext.cas(next, 0) {
6757 continue
6758 }
6759 batch[batchHead%uint32(len(batch))] = next
6760 return 1
6761 }
6762 }
6763 return 0
6764 }
6765 if n > uint32(len(pp.runq)/2) {
6766 continue
6767 }
6768 for i := uint32(0); i < n; i++ {
6769 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6770 batch[(batchHead+i)%uint32(len(batch))] = g
6771 }
6772 if atomic.CasRel(&pp.runqhead, h, h+n) {
6773 return n
6774 }
6775 }
6776 }
6777
6778
6779
6780
6781 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6782 t := pp.runqtail
6783 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6784 if n == 0 {
6785 return nil
6786 }
6787 n--
6788 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6789 if n == 0 {
6790 return gp
6791 }
6792 h := atomic.LoadAcq(&pp.runqhead)
6793 if t-h+n >= uint32(len(pp.runq)) {
6794 throw("runqsteal: runq overflow")
6795 }
6796 atomic.StoreRel(&pp.runqtail, t+n)
6797 return gp
6798 }
6799
6800
6801
6802 type gQueue struct {
6803 head guintptr
6804 tail guintptr
6805 }
6806
6807
6808 func (q *gQueue) empty() bool {
6809 return q.head == 0
6810 }
6811
6812
6813 func (q *gQueue) push(gp *g) {
6814 gp.schedlink = q.head
6815 q.head.set(gp)
6816 if q.tail == 0 {
6817 q.tail.set(gp)
6818 }
6819 }
6820
6821
6822 func (q *gQueue) pushBack(gp *g) {
6823 gp.schedlink = 0
6824 if q.tail != 0 {
6825 q.tail.ptr().schedlink.set(gp)
6826 } else {
6827 q.head.set(gp)
6828 }
6829 q.tail.set(gp)
6830 }
6831
6832
6833
6834 func (q *gQueue) pushBackAll(q2 gQueue) {
6835 if q2.tail == 0 {
6836 return
6837 }
6838 q2.tail.ptr().schedlink = 0
6839 if q.tail != 0 {
6840 q.tail.ptr().schedlink = q2.head
6841 } else {
6842 q.head = q2.head
6843 }
6844 q.tail = q2.tail
6845 }
6846
6847
6848
6849 func (q *gQueue) pop() *g {
6850 gp := q.head.ptr()
6851 if gp != nil {
6852 q.head = gp.schedlink
6853 if q.head == 0 {
6854 q.tail = 0
6855 }
6856 }
6857 return gp
6858 }
6859
6860
6861 func (q *gQueue) popList() gList {
6862 stack := gList{q.head}
6863 *q = gQueue{}
6864 return stack
6865 }
6866
6867
6868
6869 type gList struct {
6870 head guintptr
6871 }
6872
6873
6874 func (l *gList) empty() bool {
6875 return l.head == 0
6876 }
6877
6878
6879 func (l *gList) push(gp *g) {
6880 gp.schedlink = l.head
6881 l.head.set(gp)
6882 }
6883
6884
6885 func (l *gList) pushAll(q gQueue) {
6886 if !q.empty() {
6887 q.tail.ptr().schedlink = l.head
6888 l.head = q.head
6889 }
6890 }
6891
6892
6893 func (l *gList) pop() *g {
6894 gp := l.head.ptr()
6895 if gp != nil {
6896 l.head = gp.schedlink
6897 }
6898 return gp
6899 }
6900
6901
6902 func setMaxThreads(in int) (out int) {
6903 lock(&sched.lock)
6904 out = int(sched.maxmcount)
6905 if in > 0x7fffffff {
6906 sched.maxmcount = 0x7fffffff
6907 } else {
6908 sched.maxmcount = int32(in)
6909 }
6910 checkmcount()
6911 unlock(&sched.lock)
6912 return
6913 }
6914
6915
6916 func procPin() int {
6917 gp := getg()
6918 mp := gp.m
6919
6920 mp.locks++
6921 return int(mp.p.ptr().id)
6922 }
6923
6924
6925 func procUnpin() {
6926 gp := getg()
6927 gp.m.locks--
6928 }
6929
6930
6931
6932 func sync_runtime_procPin() int {
6933 return procPin()
6934 }
6935
6936
6937
6938 func sync_runtime_procUnpin() {
6939 procUnpin()
6940 }
6941
6942
6943
6944 func sync_atomic_runtime_procPin() int {
6945 return procPin()
6946 }
6947
6948
6949
6950 func sync_atomic_runtime_procUnpin() {
6951 procUnpin()
6952 }
6953
6954
6955
6956 func internal_weak_runtime_procPin() int {
6957 return procPin()
6958 }
6959
6960
6961
6962 func internal_weak_runtime_procUnpin() {
6963 procUnpin()
6964 }
6965
6966
6967
6968
6969
6970 func sync_runtime_canSpin(i int) bool {
6971
6972
6973
6974
6975
6976 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
6977 return false
6978 }
6979 if p := getg().m.p.ptr(); !runqempty(p) {
6980 return false
6981 }
6982 return true
6983 }
6984
6985
6986
6987 func sync_runtime_doSpin() {
6988 procyield(active_spin_cnt)
6989 }
6990
6991 var stealOrder randomOrder
6992
6993
6994
6995
6996
6997 type randomOrder struct {
6998 count uint32
6999 coprimes []uint32
7000 }
7001
7002 type randomEnum struct {
7003 i uint32
7004 count uint32
7005 pos uint32
7006 inc uint32
7007 }
7008
7009 func (ord *randomOrder) reset(count uint32) {
7010 ord.count = count
7011 ord.coprimes = ord.coprimes[:0]
7012 for i := uint32(1); i <= count; i++ {
7013 if gcd(i, count) == 1 {
7014 ord.coprimes = append(ord.coprimes, i)
7015 }
7016 }
7017 }
7018
7019 func (ord *randomOrder) start(i uint32) randomEnum {
7020 return randomEnum{
7021 count: ord.count,
7022 pos: i % ord.count,
7023 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7024 }
7025 }
7026
7027 func (enum *randomEnum) done() bool {
7028 return enum.i == enum.count
7029 }
7030
7031 func (enum *randomEnum) next() {
7032 enum.i++
7033 enum.pos = (enum.pos + enum.inc) % enum.count
7034 }
7035
7036 func (enum *randomEnum) position() uint32 {
7037 return enum.pos
7038 }
7039
7040 func gcd(a, b uint32) uint32 {
7041 for b != 0 {
7042 a, b = b, a%b
7043 }
7044 return a
7045 }
7046
7047
7048
7049 type initTask struct {
7050 state uint32
7051 nfns uint32
7052
7053 }
7054
7055
7056
7057 var inittrace tracestat
7058
7059 type tracestat struct {
7060 active bool
7061 id uint64
7062 allocs uint64
7063 bytes uint64
7064 }
7065
7066 func doInit(ts []*initTask) {
7067 for _, t := range ts {
7068 doInit1(t)
7069 }
7070 }
7071
7072 func doInit1(t *initTask) {
7073 switch t.state {
7074 case 2:
7075 return
7076 case 1:
7077 throw("recursive call during initialization - linker skew")
7078 default:
7079 t.state = 1
7080
7081 var (
7082 start int64
7083 before tracestat
7084 )
7085
7086 if inittrace.active {
7087 start = nanotime()
7088
7089 before = inittrace
7090 }
7091
7092 if t.nfns == 0 {
7093
7094 throw("inittask with no functions")
7095 }
7096
7097 firstFunc := add(unsafe.Pointer(t), 8)
7098 for i := uint32(0); i < t.nfns; i++ {
7099 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7100 f := *(*func())(unsafe.Pointer(&p))
7101 f()
7102 }
7103
7104 if inittrace.active {
7105 end := nanotime()
7106
7107 after := inittrace
7108
7109 f := *(*func())(unsafe.Pointer(&firstFunc))
7110 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7111
7112 var sbuf [24]byte
7113 print("init ", pkg, " @")
7114 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7115 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7116 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7117 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7118 print("\n")
7119 }
7120
7121 t.state = 2
7122 }
7123 }
7124
View as plain text