Source file
src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/gc"
14 "internal/runtime/sys"
15 "unsafe"
16 )
17
18
67
68 const (
69
70
71
72
73 stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
74
75
76 stackMin = 2048
77
78
79
80 fixedStack0 = stackMin + stackSystem
81 fixedStack1 = fixedStack0 - 1
82 fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
83 fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
84 fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
85 fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
86 fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
87 fixedStack = fixedStack6 + 1
88
89
90
91
92 stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
93
94
95
96
97
98
99
100 stackGuard = stackNosplit + stackSystem + abi.StackSmall
101 )
102
103 const (
104
105
106
107
108
109 stackDebug = 0
110 stackFromSystem = 0
111 stackFaultOnFree = 0
112 stackNoCache = 0
113
114
115 debugCheckBP = false
116 )
117
118 var (
119 stackPoisonCopy = 0
120 )
121
122 const (
123 uintptrMask = 1<<(8*goarch.PtrSize) - 1
124
125
126
127
128
129
130
131 stackPreempt = uintptrMask & -1314
132
133
134
135 stackFork = uintptrMask & -1234
136
137
138
139 stackForceMove = uintptrMask & -275
140
141
142 stackPoisonMin = uintptrMask & -4096
143 )
144
145
146
147
148
149
150
151 var stackpool [_NumStackOrders]struct {
152 item stackpoolItem
153 _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
154 }
155
156 type stackpoolItem struct {
157 _ sys.NotInHeap
158 mu mutex
159 span mSpanList
160 }
161
162
163 var stackLarge struct {
164 lock mutex
165 free [heapAddrBits - gc.PageShift]mSpanList
166 }
167
168 func stackinit() {
169 if _StackCacheSize&pageMask != 0 {
170 throw("cache size must be a multiple of page size")
171 }
172 for i := range stackpool {
173 stackpool[i].item.span.init()
174 lockInit(&stackpool[i].item.mu, lockRankStackpool)
175 }
176 for i := range stackLarge.free {
177 stackLarge.free[i].init()
178 lockInit(&stackLarge.lock, lockRankStackLarge)
179 }
180 }
181
182
183 func stacklog2(n uintptr) int {
184 log2 := 0
185 for n > 1 {
186 n >>= 1
187 log2++
188 }
189 return log2
190 }
191
192
193
194 func stackpoolalloc(order uint8) gclinkptr {
195 list := &stackpool[order].item.span
196 s := list.first
197 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
198 if s == nil {
199
200 s = mheap_.allocManual(_StackCacheSize>>gc.PageShift, spanAllocStack)
201 if s == nil {
202 throw("out of memory")
203 }
204 if s.allocCount != 0 {
205 throw("bad allocCount")
206 }
207 if s.manualFreeList.ptr() != nil {
208 throw("bad manualFreeList")
209 }
210 osStackAlloc(s)
211 s.elemsize = fixedStack << order
212 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
213 x := gclinkptr(s.base() + i)
214 if valgrindenabled {
215
216
217
218
219 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
220 }
221 x.ptr().next = s.manualFreeList
222 s.manualFreeList = x
223 }
224 list.insert(s)
225 }
226 x := s.manualFreeList
227 if x.ptr() == nil {
228 throw("span has no free stacks")
229 }
230 s.manualFreeList = x.ptr().next
231 s.allocCount++
232 if s.manualFreeList.ptr() == nil {
233
234 list.remove(s)
235 }
236 return x
237 }
238
239
240 func stackpoolfree(x gclinkptr, order uint8) {
241 s := spanOfUnchecked(uintptr(x))
242 if s.state.get() != mSpanManual {
243 throw("freeing stack not in a stack span")
244 }
245 if s.manualFreeList.ptr() == nil {
246
247 stackpool[order].item.span.insert(s)
248 }
249 x.ptr().next = s.manualFreeList
250 s.manualFreeList = x
251 s.allocCount--
252 if gcphase == _GCoff && s.allocCount == 0 {
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 stackpool[order].item.span.remove(s)
269 s.manualFreeList = 0
270 osStackFree(s)
271 mheap_.freeManual(s, spanAllocStack)
272 }
273 }
274
275
276
277
278
279 func stackcacherefill(c *mcache, order uint8) {
280 if stackDebug >= 1 {
281 print("stackcacherefill order=", order, "\n")
282 }
283
284
285
286 var list gclinkptr
287 var size uintptr
288 lock(&stackpool[order].item.mu)
289 for size < _StackCacheSize/2 {
290 x := stackpoolalloc(order)
291 x.ptr().next = list
292 list = x
293 size += fixedStack << order
294 }
295 unlock(&stackpool[order].item.mu)
296 c.stackcache[order].list = list
297 c.stackcache[order].size = size
298 }
299
300
301 func stackcacherelease(c *mcache, order uint8) {
302 if stackDebug >= 1 {
303 print("stackcacherelease order=", order, "\n")
304 }
305 x := c.stackcache[order].list
306 size := c.stackcache[order].size
307 lock(&stackpool[order].item.mu)
308 for size > _StackCacheSize/2 {
309 y := x.ptr().next
310 stackpoolfree(x, order)
311 x = y
312 size -= fixedStack << order
313 }
314 unlock(&stackpool[order].item.mu)
315 c.stackcache[order].list = x
316 c.stackcache[order].size = size
317 }
318
319
320 func stackcache_clear(c *mcache) {
321 if stackDebug >= 1 {
322 print("stackcache clear\n")
323 }
324 for order := uint8(0); order < _NumStackOrders; order++ {
325 lock(&stackpool[order].item.mu)
326 x := c.stackcache[order].list
327 for x.ptr() != nil {
328 y := x.ptr().next
329 stackpoolfree(x, order)
330 x = y
331 }
332 c.stackcache[order].list = 0
333 c.stackcache[order].size = 0
334 unlock(&stackpool[order].item.mu)
335 }
336 }
337
338
339
340
341
342
343
344 func stackalloc(n uint32) stack {
345
346
347
348 thisg := getg()
349 if thisg != thisg.m.g0 {
350 throw("stackalloc not on scheduler stack")
351 }
352 if n&(n-1) != 0 {
353 throw("stack size not a power of 2")
354 }
355 if stackDebug >= 1 {
356 print("stackalloc ", n, "\n")
357 }
358
359 if debug.efence != 0 || stackFromSystem != 0 {
360 n = uint32(alignUp(uintptr(n), physPageSize))
361 v := sysAlloc(uintptr(n), &memstats.stacks_sys, "goroutine stack (system)")
362 if v == nil {
363 throw("out of memory (stackalloc)")
364 }
365 return stack{uintptr(v), uintptr(v) + uintptr(n)}
366 }
367
368
369
370
371 var v unsafe.Pointer
372 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
373 order := uint8(0)
374 n2 := n
375 for n2 > fixedStack {
376 order++
377 n2 >>= 1
378 }
379 var x gclinkptr
380 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
381
382
383
384
385 lock(&stackpool[order].item.mu)
386 x = stackpoolalloc(order)
387 unlock(&stackpool[order].item.mu)
388 } else {
389 c := thisg.m.p.ptr().mcache
390 x = c.stackcache[order].list
391 if x.ptr() == nil {
392 stackcacherefill(c, order)
393 x = c.stackcache[order].list
394 }
395 c.stackcache[order].list = x.ptr().next
396 c.stackcache[order].size -= uintptr(n)
397 }
398 if valgrindenabled {
399
400
401
402 valgrindFree(unsafe.Pointer(x.ptr()))
403 }
404 v = unsafe.Pointer(x)
405 } else {
406 var s *mspan
407 npage := uintptr(n) >> gc.PageShift
408 log2npage := stacklog2(npage)
409
410
411 lock(&stackLarge.lock)
412 if !stackLarge.free[log2npage].isEmpty() {
413 s = stackLarge.free[log2npage].first
414 stackLarge.free[log2npage].remove(s)
415 }
416 unlock(&stackLarge.lock)
417
418 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
419
420 if s == nil {
421
422 s = mheap_.allocManual(npage, spanAllocStack)
423 if s == nil {
424 throw("out of memory")
425 }
426 osStackAlloc(s)
427 s.elemsize = uintptr(n)
428 }
429 v = unsafe.Pointer(s.base())
430 }
431
432 if traceAllocFreeEnabled() {
433 trace := traceAcquire()
434 if trace.ok() {
435 trace.GoroutineStackAlloc(uintptr(v), uintptr(n))
436 traceRelease(trace)
437 }
438 }
439 if raceenabled {
440 racemalloc(v, uintptr(n))
441 }
442 if msanenabled {
443 msanmalloc(v, uintptr(n))
444 }
445 if asanenabled {
446 asanunpoison(v, uintptr(n))
447 }
448 if valgrindenabled {
449 valgrindMalloc(v, uintptr(n))
450 }
451 if stackDebug >= 1 {
452 print(" allocated ", v, "\n")
453 }
454 return stack{uintptr(v), uintptr(v) + uintptr(n)}
455 }
456
457
458
459
460
461
462
463 func stackfree(stk stack) {
464 gp := getg()
465 v := unsafe.Pointer(stk.lo)
466 n := stk.hi - stk.lo
467 if n&(n-1) != 0 {
468 throw("stack not a power of 2")
469 }
470 if stk.lo+n < stk.hi {
471 throw("bad stack size")
472 }
473 if stackDebug >= 1 {
474 println("stackfree", v, n)
475 memclrNoHeapPointers(v, n)
476 }
477 if debug.efence != 0 || stackFromSystem != 0 {
478 if debug.efence != 0 || stackFaultOnFree != 0 {
479 sysFault(v, n)
480 } else {
481 sysFree(v, n, &memstats.stacks_sys)
482 }
483 return
484 }
485 if traceAllocFreeEnabled() {
486 trace := traceAcquire()
487 if trace.ok() {
488 trace.GoroutineStackFree(uintptr(v))
489 traceRelease(trace)
490 }
491 }
492 if msanenabled {
493 msanfree(v, n)
494 }
495 if asanenabled {
496 asanpoison(v, n)
497 }
498 if valgrindenabled {
499 valgrindFree(v)
500 }
501 if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
502 order := uint8(0)
503 n2 := n
504 for n2 > fixedStack {
505 order++
506 n2 >>= 1
507 }
508 x := gclinkptr(v)
509 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
510 lock(&stackpool[order].item.mu)
511 if valgrindenabled {
512
513
514 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
515 }
516 stackpoolfree(x, order)
517 unlock(&stackpool[order].item.mu)
518 } else {
519 c := gp.m.p.ptr().mcache
520 if c.stackcache[order].size >= _StackCacheSize {
521 stackcacherelease(c, order)
522 }
523 if valgrindenabled {
524
525
526
527 valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr()))
528 }
529 x.ptr().next = c.stackcache[order].list
530 c.stackcache[order].list = x
531 c.stackcache[order].size += n
532 }
533 } else {
534 s := spanOfUnchecked(uintptr(v))
535 if s.state.get() != mSpanManual {
536 println(hex(s.base()), v)
537 throw("bad span state")
538 }
539 if gcphase == _GCoff {
540
541
542 osStackFree(s)
543 mheap_.freeManual(s, spanAllocStack)
544 } else {
545
546
547
548
549
550 log2npage := stacklog2(s.npages)
551 lock(&stackLarge.lock)
552 stackLarge.free[log2npage].insert(s)
553 unlock(&stackLarge.lock)
554 }
555 }
556 }
557
558 var maxstacksize uintptr = 1 << 20
559
560 var maxstackceiling = maxstacksize
561
562 var ptrnames = []string{
563 0: "scalar",
564 1: "ptr",
565 }
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600 type adjustinfo struct {
601 old stack
602 delta uintptr
603
604
605 sghi uintptr
606 }
607
608
609
610 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
611 pp := (*uintptr)(vpp)
612 p := *pp
613 if stackDebug >= 4 {
614 print(" ", pp, ":", hex(p), "\n")
615 }
616 if valgrindenabled {
617
618
619
620
621
622
623
624 valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p))
625 }
626 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
627 *pp = p + adjinfo.delta
628 if stackDebug >= 3 {
629 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
630 }
631 }
632 }
633
634
635
636 type bitvector struct {
637 n int32
638 bytedata *uint8
639 }
640
641
642
643
644
645 func (bv *bitvector) ptrbit(i uintptr) uint8 {
646 b := *(addb(bv.bytedata, i/8))
647 return (b >> (i % 8)) & 1
648 }
649
650
651
652 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
653 minp := adjinfo.old.lo
654 maxp := adjinfo.old.hi
655 delta := adjinfo.delta
656 num := uintptr(bv.n)
657
658
659
660
661
662 useCAS := uintptr(scanp) < adjinfo.sghi
663 for i := uintptr(0); i < num; i += 8 {
664 if stackDebug >= 4 {
665 for j := uintptr(0); j < 8; j++ {
666 print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
667 }
668 }
669 b := *(addb(bv.bytedata, i/8))
670 for b != 0 {
671 j := uintptr(sys.TrailingZeros8(b))
672 b &= b - 1
673 pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
674 retry:
675 p := *pp
676 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
677
678
679 getg().m.traceback = 2
680 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
681 throw("invalid pointer found on stack")
682 }
683 if minp <= p && p < maxp {
684 if stackDebug >= 3 {
685 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
686 }
687 if useCAS {
688 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
689 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
690 goto retry
691 }
692 } else {
693 *pp = p + delta
694 }
695 }
696 }
697 }
698 }
699
700
701 func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
702 if frame.continpc == 0 {
703
704 return
705 }
706 f := frame.fn
707 if stackDebug >= 2 {
708 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
709 }
710
711
712 if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
713 if stackDebug >= 3 {
714 print(" saved bp\n")
715 }
716 if debugCheckBP {
717
718
719 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
720 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
721 println("runtime: found invalid frame pointer")
722 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
723 throw("bad frame pointer")
724 }
725 }
726
727
728
729
730 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
731 }
732
733 locals, args, objs := frame.getStackMap(true)
734
735
736 if locals.n > 0 {
737 size := uintptr(locals.n) * goarch.PtrSize
738 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
739 }
740
741
742 if args.n > 0 {
743 if stackDebug >= 3 {
744 print(" args\n")
745 }
746 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
747 }
748
749
750
751 if frame.varp != 0 {
752 for i := range objs {
753 obj := &objs[i]
754 off := obj.off
755 base := frame.varp
756 if off >= 0 {
757 base = frame.argp
758 }
759 p := base + uintptr(off)
760 if p < frame.sp {
761
762
763
764 continue
765 }
766 ptrBytes, gcData := obj.gcdata()
767 for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize {
768 if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
769 adjustpointer(adjinfo, unsafe.Pointer(p+i))
770 }
771 }
772 }
773 }
774 }
775
776 func adjustctxt(gp *g, adjinfo *adjustinfo) {
777 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
778 if !framepointer_enabled {
779 return
780 }
781 if debugCheckBP {
782 bp := gp.sched.bp
783 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
784 println("runtime: found invalid top frame pointer")
785 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
786 throw("bad top frame pointer")
787 }
788 }
789 oldfp := gp.sched.bp
790 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
791 if GOARCH == "arm64" {
792
793
794
795 if oldfp == gp.sched.sp-goarch.PtrSize {
796 memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
797 adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
798 }
799 }
800 }
801
802 func adjustdefers(gp *g, adjinfo *adjustinfo) {
803
804
805
806 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
807 for d := gp._defer; d != nil; d = d.link {
808 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
809 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
810 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
811 }
812 }
813
814 func adjustpanics(gp *g, adjinfo *adjustinfo) {
815
816
817 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
818 }
819
820 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
821
822
823 for s := gp.waiting; s != nil; s = s.waitlink {
824 adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu))
825 adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp))
826 }
827 }
828
829 func fillstack(stk stack, b byte) {
830 for p := stk.lo; p < stk.hi; p++ {
831 *(*byte)(unsafe.Pointer(p)) = b
832 }
833 }
834
835 func findsghi(gp *g, stk stack) uintptr {
836 var sghi uintptr
837 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
838 p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize)
839 if stk.lo <= p && p < stk.hi && p > sghi {
840 sghi = p
841 }
842 }
843 return sghi
844 }
845
846
847
848
849 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
850 if gp.waiting == nil {
851 return 0
852 }
853
854
855 var lastc *hchan
856 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
857 if sg.c.get() != lastc {
858
859
860
861
862
863
864
865
866
867 lockWithRank(&sg.c.get().lock, lockRankHchanLeaf)
868 }
869 lastc = sg.c.get()
870 }
871
872
873 adjustsudogs(gp, adjinfo)
874
875
876
877
878 var sgsize uintptr
879 if adjinfo.sghi != 0 {
880 oldBot := adjinfo.old.hi - used
881 newBot := oldBot + adjinfo.delta
882 sgsize = adjinfo.sghi - oldBot
883 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
884 }
885
886
887 lastc = nil
888 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
889 if sg.c.get() != lastc {
890 unlock(&sg.c.get().lock)
891 }
892 lastc = sg.c.get()
893 }
894
895 return sgsize
896 }
897
898
899
900 func copystack(gp *g, newsize uintptr) {
901 if gp.syscallsp != 0 {
902 throw("stack growth not allowed in system call")
903 }
904 old := gp.stack
905 if old.lo == 0 {
906 throw("nil stackbase")
907 }
908 used := old.hi - gp.sched.sp
909
910
911
912
913 gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
914
915
916 new := stackalloc(uint32(newsize))
917 if stackPoisonCopy != 0 {
918 fillstack(new, 0xfd)
919 }
920 if stackDebug >= 1 {
921 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
922 }
923
924
925 var adjinfo adjustinfo
926 adjinfo.old = old
927 adjinfo.delta = new.hi - old.hi
928
929
930 ncopy := used
931 if !gp.activeStackChans {
932 if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
933
934
935
936
937 throw("racy sudog adjustment due to parking on channel")
938 }
939 adjustsudogs(gp, &adjinfo)
940 } else {
941
942
943
944
945
946
947
948 adjinfo.sghi = findsghi(gp, old)
949
950
951
952 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
953 }
954
955
956 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
957
958
959
960
961 adjustctxt(gp, &adjinfo)
962 adjustdefers(gp, &adjinfo)
963 adjustpanics(gp, &adjinfo)
964 if adjinfo.sghi != 0 {
965 adjinfo.sghi += adjinfo.delta
966 }
967
968
969 gp.stack = new
970 gp.stackguard0 = new.lo + stackGuard
971 gp.sched.sp = new.hi - used
972 gp.stktopsp += adjinfo.delta
973
974
975 var u unwinder
976 for u.init(gp, 0); u.valid(); u.next() {
977 adjustframe(&u.frame, &adjinfo)
978 }
979
980 if valgrindenabled {
981 if gp.valgrindStackID == 0 {
982 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
983 } else {
984 valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi))
985 }
986 }
987
988
989 if stackPoisonCopy != 0 {
990 fillstack(old, 0xfc)
991 }
992 stackfree(old)
993 }
994
995
996 func round2(x int32) int32 {
997 s := uint(0)
998 for 1<<s < x {
999 s++
1000 }
1001 return 1 << s
1002 }
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 func newstack() {
1017 thisg := getg()
1018
1019 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
1020 throw("stack growth after fork")
1021 }
1022 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
1023 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
1024 morebuf := thisg.m.morebuf
1025 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
1026 throw("runtime: wrong goroutine in newstack")
1027 }
1028
1029 gp := thisg.m.curg
1030
1031 if thisg.m.curg.throwsplit {
1032
1033 morebuf := thisg.m.morebuf
1034 gp.syscallsp = morebuf.sp
1035 gp.syscallpc = morebuf.pc
1036 pcname, pcoff := "(unknown)", uintptr(0)
1037 f := findfunc(gp.sched.pc)
1038 if f.valid() {
1039 pcname = funcname(f)
1040 pcoff = gp.sched.pc - f.entry()
1041 }
1042 print("runtime: newstack at ", pcname, "+", hex(pcoff),
1043 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1044 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1045 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1046
1047 thisg.m.traceback = 2
1048 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
1049 throw("runtime: stack split at bad time")
1050 }
1051
1052 morebuf := thisg.m.morebuf
1053 thisg.m.morebuf.pc = 0
1054 thisg.m.morebuf.lr = 0
1055 thisg.m.morebuf.sp = 0
1056 thisg.m.morebuf.g = 0
1057
1058
1059
1060
1061 stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 preempt := stackguard0 == stackPreempt
1076 if preempt {
1077 if !canPreemptM(thisg.m) {
1078
1079
1080 gp.stackguard0 = gp.stack.lo + stackGuard
1081 gogo(&gp.sched)
1082 }
1083 }
1084
1085 if gp.stack.lo == 0 {
1086 throw("missing stack in newstack")
1087 }
1088 sp := gp.sched.sp
1089 if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
1090
1091 sp -= goarch.PtrSize
1092 }
1093 if stackDebug >= 1 || sp < gp.stack.lo {
1094 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1095 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1096 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1097 }
1098 if sp < gp.stack.lo {
1099 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1100 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1101 throw("runtime: split stack overflow")
1102 }
1103
1104 if preempt {
1105 if gp == thisg.m.g0 {
1106 throw("runtime: preempt g0")
1107 }
1108 if thisg.m.p == 0 && thisg.m.locks == 0 {
1109 throw("runtime: g is running but p is not")
1110 }
1111
1112 if gp.preemptShrink {
1113
1114
1115 gp.preemptShrink = false
1116 shrinkstack(gp)
1117 }
1118
1119
1120 gp.syncSafePoint = true
1121
1122 if gp.preemptStop {
1123 preemptPark(gp)
1124 }
1125
1126
1127 gopreempt_m(gp)
1128 }
1129
1130
1131 oldsize := gp.stack.hi - gp.stack.lo
1132 newsize := oldsize * 2
1133
1134
1135
1136
1137 if f := findfunc(gp.sched.pc); f.valid() {
1138 max := uintptr(funcMaxSPDelta(f))
1139 needed := max + stackGuard
1140 used := gp.stack.hi - gp.sched.sp
1141 for newsize-used < needed {
1142 newsize *= 2
1143 }
1144 }
1145
1146 if stackguard0 == stackForceMove {
1147
1148
1149
1150 newsize = oldsize
1151 }
1152
1153 if newsize > maxstacksize || newsize > maxstackceiling {
1154 if maxstacksize < maxstackceiling {
1155 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1156 } else {
1157 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1158 }
1159 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1160 throw("stack overflow")
1161 }
1162
1163
1164
1165 casgstatus(gp, _Grunning, _Gcopystack)
1166
1167
1168
1169 copystack(gp, newsize)
1170 if stackDebug >= 1 {
1171 print("stack grow done\n")
1172 }
1173 casgstatus(gp, _Gcopystack, _Grunning)
1174 gogo(&gp.sched)
1175 }
1176
1177
1178 func nilfunc() {
1179 *(*uint8)(nil) = 0
1180 }
1181
1182
1183
1184 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1185 var fn unsafe.Pointer
1186 if fv != nil {
1187 fn = unsafe.Pointer(fv.fn)
1188 } else {
1189 fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
1190 }
1191 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1192 }
1193
1194
1195
1196
1197
1198 func isShrinkStackSafe(gp *g) bool {
1199
1200
1201
1202
1203 if gp.syscallsp != 0 {
1204 return false
1205 }
1206
1207
1208
1209 if gp.asyncSafePoint {
1210 return false
1211 }
1212
1213
1214
1215 if gp.parkingOnChan.Load() {
1216 return false
1217 }
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if readgstatus(gp)&^_Gscan == _Gwaiting && gp.waitreason.isWaitingForSuspendG() {
1230 return false
1231 }
1232 return true
1233 }
1234
1235
1236
1237
1238
1239 func shrinkstack(gp *g) {
1240 if gp.stack.lo == 0 {
1241 throw("missing stack in shrinkstack")
1242 }
1243 if s := readgstatus(gp); s&_Gscan == 0 {
1244
1245
1246
1247 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1248
1249 throw("bad status in shrinkstack")
1250 }
1251 }
1252 if !isShrinkStackSafe(gp) {
1253 throw("shrinkstack at bad time")
1254 }
1255
1256
1257
1258 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1259 throw("shrinking stack in libcall")
1260 }
1261
1262 if debug.gcshrinkstackoff > 0 {
1263 return
1264 }
1265
1266 oldsize := gp.stack.hi - gp.stack.lo
1267 newsize := oldsize / 2
1268
1269
1270 if newsize < fixedStack {
1271 return
1272 }
1273
1274
1275
1276
1277
1278 avail := gp.stack.hi - gp.stack.lo
1279 if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
1280 return
1281 }
1282
1283 if stackDebug > 0 {
1284 print("shrinking stack ", oldsize, "->", newsize, "\n")
1285 }
1286
1287 copystack(gp, newsize)
1288 }
1289
1290
1291 func freeStackSpans() {
1292
1293 for order := range stackpool {
1294 lock(&stackpool[order].item.mu)
1295 list := &stackpool[order].item.span
1296 for s := list.first; s != nil; {
1297 next := s.next
1298 if s.allocCount == 0 {
1299 list.remove(s)
1300 s.manualFreeList = 0
1301 osStackFree(s)
1302 mheap_.freeManual(s, spanAllocStack)
1303 }
1304 s = next
1305 }
1306 unlock(&stackpool[order].item.mu)
1307 }
1308
1309
1310 lock(&stackLarge.lock)
1311 for i := range stackLarge.free {
1312 for s := stackLarge.free[i].first; s != nil; {
1313 next := s.next
1314 stackLarge.free[i].remove(s)
1315 osStackFree(s)
1316 mheap_.freeManual(s, spanAllocStack)
1317 s = next
1318 }
1319 }
1320 unlock(&stackLarge.lock)
1321 }
1322
1323
1324
1325 type stackObjectRecord struct {
1326
1327
1328
1329 off int32
1330 size int32
1331 ptrBytes int32
1332 gcdataoff uint32
1333 }
1334
1335
1336
1337
1338 func (r *stackObjectRecord) gcdata() (uintptr, *byte) {
1339 ptr := uintptr(unsafe.Pointer(r))
1340 var mod *moduledata
1341 for datap := &firstmoduledata; datap != nil; datap = datap.next {
1342 if datap.gofunc <= ptr && ptr < datap.end {
1343 mod = datap
1344 break
1345 }
1346 }
1347
1348
1349
1350 res := mod.rodata + uintptr(r.gcdataoff)
1351 return uintptr(r.ptrBytes), (*byte)(unsafe.Pointer(res))
1352 }
1353
1354
1355
1356
1357
1358 func morestackc() {
1359 throw("attempt to execute system stack code on user stack")
1360 }
1361
1362
1363
1364
1365
1366 var startingStackSize uint32 = fixedStack
1367
1368 func gcComputeStartingStackSize() {
1369 if debug.adaptivestackstart == 0 {
1370 return
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 var scannedStackSize uint64
1383 var scannedStacks uint64
1384 for _, p := range allp {
1385 scannedStackSize += p.scannedStackSize
1386 scannedStacks += p.scannedStacks
1387
1388 p.scannedStackSize = 0
1389 p.scannedStacks = 0
1390 }
1391 if scannedStacks == 0 {
1392 startingStackSize = fixedStack
1393 return
1394 }
1395 avg := scannedStackSize/scannedStacks + stackGuard
1396
1397
1398 if avg > uint64(maxstacksize) {
1399 avg = uint64(maxstacksize)
1400 }
1401 if avg < fixedStack {
1402 avg = fixedStack
1403 }
1404
1405 startingStackSize = uint32(round2(int32(avg)))
1406 }
1407
View as plain text