Source file
src/runtime/stack_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/asan"
10 "internal/testenv"
11 "reflect"
12 "regexp"
13 . "runtime"
14 "strings"
15 "sync"
16 "sync/atomic"
17 "testing"
18 "time"
19 _ "unsafe"
20 )
21
22
23
24 func TestStackMem(t *testing.T) {
25 const (
26 BatchSize = 32
27 BatchCount = 256
28 ArraySize = 1024
29 RecursionDepth = 128
30 )
31 if testing.Short() {
32 return
33 }
34 defer GOMAXPROCS(GOMAXPROCS(BatchSize))
35 s0 := new(MemStats)
36 ReadMemStats(s0)
37 for b := 0; b < BatchCount; b++ {
38 c := make(chan bool, BatchSize)
39 for i := 0; i < BatchSize; i++ {
40 go func() {
41 var f func(k int, a [ArraySize]byte)
42 f = func(k int, a [ArraySize]byte) {
43 if k == 0 {
44 time.Sleep(time.Millisecond)
45 return
46 }
47 f(k-1, a)
48 }
49 f(RecursionDepth, [ArraySize]byte{})
50 c <- true
51 }()
52 }
53 for i := 0; i < BatchSize; i++ {
54 <-c
55 }
56
57
58
59
60 time.Sleep(10 * time.Millisecond)
61 }
62 s1 := new(MemStats)
63 ReadMemStats(s1)
64 consumed := int64(s1.StackSys - s0.StackSys)
65 t.Logf("Consumed %vMB for stack mem", consumed>>20)
66 estimate := int64(8 * BatchSize * ArraySize * RecursionDepth)
67 if consumed > estimate {
68 t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
69 }
70
71
72 inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
73 t.Logf("Inuse %vMB for stack mem", inuse>>20)
74 if inuse > 4<<20 {
75 t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
76 }
77 }
78
79
80 func TestStackGrowth(t *testing.T) {
81 if *flagQuick {
82 t.Skip("-quick")
83 }
84
85 var wg sync.WaitGroup
86
87
88 var growDuration time.Duration
89 wg.Add(1)
90 go func() {
91 defer wg.Done()
92 start := time.Now()
93 growStack(nil)
94 growDuration = time.Since(start)
95 }()
96 wg.Wait()
97 t.Log("first growStack took", growDuration)
98
99
100 wg.Add(1)
101 go func() {
102 defer wg.Done()
103 LockOSThread()
104 growStack(nil)
105 UnlockOSThread()
106 }()
107 wg.Wait()
108
109
110 var finalizerStart time.Time
111 var started atomic.Bool
112 var progress atomic.Uint32
113 wg.Add(1)
114 s := new(string)
115 SetFinalizer(s, func(ss *string) {
116 defer wg.Done()
117 finalizerStart = time.Now()
118 started.Store(true)
119 growStack(&progress)
120 })
121 setFinalizerTime := time.Now()
122 s = nil
123
124 if d, ok := t.Deadline(); ok {
125
126 timeout := time.Until(d) * 19 / 20
127 timer := time.AfterFunc(timeout, func() {
128
129
130
131 if !started.Load() {
132 panic("finalizer did not start")
133 } else {
134 panic(fmt.Sprintf("finalizer started %s ago (%s after registration) and ran %d iterations, but did not return", time.Since(finalizerStart), finalizerStart.Sub(setFinalizerTime), progress.Load()))
135 }
136 })
137 defer timer.Stop()
138 }
139
140 GC()
141 wg.Wait()
142 t.Logf("finalizer started after %s and ran %d iterations in %v", finalizerStart.Sub(setFinalizerTime), progress.Load(), time.Since(finalizerStart))
143 }
144
145
146
147
148
149
150 func growStack(progress *atomic.Uint32) {
151 n := 1 << 10
152 if testing.Short() {
153 n = 1 << 8
154 }
155 for i := 0; i < n; i++ {
156 x := 0
157 growStackIter(&x, i)
158 if x != i+1 {
159 panic("stack is corrupted")
160 }
161 if progress != nil {
162 progress.Store(uint32(i))
163 }
164 }
165 GC()
166 }
167
168
169
170 func growStackIter(p *int, n int) {
171 if n == 0 {
172 *p = n + 1
173 GC()
174 return
175 }
176 *p = n + 1
177 x := 0
178 growStackIter(&x, n-1)
179 if x != n {
180 panic("stack is corrupted")
181 }
182 }
183
184 func TestStackGrowthCallback(t *testing.T) {
185 t.Parallel()
186 var wg sync.WaitGroup
187
188
189 wg.Add(1)
190 go func() {
191 defer wg.Done()
192 c := make(chan int, 1)
193 growStackWithCallback(func() {
194 c <- 1
195 <-c
196 })
197 }()
198
199
200 wg.Add(1)
201 go func() {
202 defer wg.Done()
203 m := make(map[int]int)
204 growStackWithCallback(func() {
205 _, _ = m[1]
206 m[1] = 1
207 })
208 }()
209
210
211 wg.Add(1)
212 go func() {
213 defer wg.Done()
214 growStackWithCallback(func() {
215 done := make(chan bool)
216 go func() {
217 done <- true
218 }()
219 <-done
220 })
221 }()
222 wg.Wait()
223 }
224
225 func growStackWithCallback(cb func()) {
226 var f func(n int)
227 f = func(n int) {
228 if n == 0 {
229 cb()
230 return
231 }
232 f(n - 1)
233 }
234 for i := 0; i < 1<<10; i++ {
235 f(i)
236 }
237 }
238
239
240
241 func set(p *int, x int) {
242 *p = x
243 }
244 func TestDeferPtrs(t *testing.T) {
245 var y int
246
247 defer func() {
248 if y != 42 {
249 t.Errorf("defer's stack references were not adjusted appropriately")
250 }
251 }()
252 defer set(&y, 42)
253 growStack(nil)
254 }
255
256 type bigBuf [4 * 1024]byte
257
258
259
260
261
262
263
264 func TestDeferPtrsGoexit(t *testing.T) {
265 for i := 0; i < 100; i++ {
266 c := make(chan int, 1)
267 go testDeferPtrsGoexit(c, i)
268 if n := <-c; n != 42 {
269 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
270 }
271 }
272 }
273
274 func testDeferPtrsGoexit(c chan int, i int) {
275 var y int
276 defer func() {
277 c <- y
278 }()
279 defer setBig(&y, 42, bigBuf{})
280 useStackAndCall(i, Goexit)
281 }
282
283 func setBig(p *int, x int, b bigBuf) {
284 *p = x
285 }
286
287
288
289
290 func TestDeferPtrsPanic(t *testing.T) {
291 for i := 0; i < 100; i++ {
292 c := make(chan int, 1)
293 go testDeferPtrsGoexit(c, i)
294 if n := <-c; n != 42 {
295 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
296 }
297 }
298 }
299
300 func testDeferPtrsPanic(c chan int, i int) {
301 var y int
302 defer func() {
303 if recover() == nil {
304 c <- -1
305 return
306 }
307 c <- y
308 }()
309 defer setBig(&y, 42, bigBuf{})
310 useStackAndCall(i, func() { panic(1) })
311 }
312
313
314 func testDeferLeafSigpanic1() {
315
316
317
318
319
320
321 *(*int)(nil) = 0
322 }
323
324
325
326
327
328
329 func TestDeferLeafSigpanic(t *testing.T) {
330
331 defer func() {
332 if err := recover(); err == nil {
333 t.Fatal("expected panic from nil pointer")
334 }
335 GC()
336 }()
337
338
339
340
341
342
343 testDeferLeafSigpanic1()
344 }
345
346
347
348
349 func TestPanicUseStack(t *testing.T) {
350 pc := make([]uintptr, 10000)
351 defer func() {
352 recover()
353 Callers(0, pc)
354 useStackAndCall(100, func() {
355 defer func() {
356 recover()
357 Callers(0, pc)
358 useStackAndCall(200, func() {
359 defer func() {
360 recover()
361 Callers(0, pc)
362 }()
363 panic(3)
364 })
365 }()
366 panic(2)
367 })
368 }()
369 panic(1)
370 }
371
372 func TestPanicFar(t *testing.T) {
373 var xtree *xtreeNode
374 pc := make([]uintptr, 10000)
375 defer func() {
376
377
378
379 Callers(0, pc)
380 }()
381 defer func() {
382 recover()
383 }()
384 useStackAndCall(100, func() {
385
386
387 xtree = makeTree(18)
388
389 time.Sleep(time.Millisecond)
390 panic(1)
391 })
392 _ = xtree
393 }
394
395 type xtreeNode struct {
396 l, r *xtreeNode
397 }
398
399 func makeTree(d int) *xtreeNode {
400 if d == 0 {
401 return new(xtreeNode)
402 }
403 return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
404 }
405
406
407 func useStackAndCall(n int, f func()) {
408 if n == 0 {
409 f()
410 return
411 }
412 var b [1024]byte
413 useStackAndCall(n-1+int(b[99]), f)
414 }
415
416 func useStack(n int) {
417 useStackAndCall(n, func() {})
418 }
419
420 func growing(c chan int, done chan struct{}) {
421 for n := range c {
422 useStack(n)
423 done <- struct{}{}
424 }
425 done <- struct{}{}
426 }
427
428 func TestStackCache(t *testing.T) {
429
430
431 const (
432 R = 4
433 G = 200
434 S = 5
435 )
436 for i := 0; i < R; i++ {
437 var reqchans [G]chan int
438 done := make(chan struct{})
439 for j := 0; j < G; j++ {
440 reqchans[j] = make(chan int)
441 go growing(reqchans[j], done)
442 }
443 for s := 0; s < S; s++ {
444 for j := 0; j < G; j++ {
445 reqchans[j] <- 1 << uint(s)
446 }
447 for j := 0; j < G; j++ {
448 <-done
449 }
450 }
451 for j := 0; j < G; j++ {
452 close(reqchans[j])
453 }
454 for j := 0; j < G; j++ {
455 <-done
456 }
457 }
458 }
459
460 func TestStackOutput(t *testing.T) {
461 b := make([]byte, 1024)
462 stk := string(b[:Stack(b, false)])
463 if !strings.HasPrefix(stk, "goroutine ") {
464 t.Errorf("Stack (len %d):\n%s", len(stk), stk)
465 t.Errorf("Stack output should begin with \"goroutine \"")
466 }
467 }
468
469 func TestStackAllOutput(t *testing.T) {
470 b := make([]byte, 1024)
471 stk := string(b[:Stack(b, true)])
472 if !strings.HasPrefix(stk, "goroutine ") {
473 t.Errorf("Stack (len %d):\n%s", len(stk), stk)
474 t.Errorf("Stack output should begin with \"goroutine \"")
475 }
476 }
477
478 func TestStackPanic(t *testing.T) {
479
480
481
482
483
484 defer func() {
485 if x := recover(); x == nil {
486 t.Errorf("recover failed")
487 }
488 }()
489 useStack(32)
490 panic("test panic")
491 }
492
493 func BenchmarkStackCopyPtr(b *testing.B) {
494 c := make(chan bool)
495 for i := 0; i < b.N; i++ {
496 go func() {
497 i := 1000000
498 countp(&i)
499 c <- true
500 }()
501 <-c
502 }
503 }
504
505 func countp(n *int) {
506 if *n == 0 {
507 return
508 }
509 *n--
510 countp(n)
511 }
512
513 func BenchmarkStackCopy(b *testing.B) {
514 c := make(chan bool)
515 for i := 0; i < b.N; i++ {
516 go func() {
517 count(1000000)
518 c <- true
519 }()
520 <-c
521 }
522 }
523
524 func count(n int) int {
525 if n == 0 {
526 return 0
527 }
528 return 1 + count(n-1)
529 }
530
531 func BenchmarkStackCopyNoCache(b *testing.B) {
532 c := make(chan bool)
533 for i := 0; i < b.N; i++ {
534 go func() {
535 count1(1000000)
536 c <- true
537 }()
538 <-c
539 }
540 }
541
542 func count1(n int) int {
543 if n <= 0 {
544 return 0
545 }
546 return 1 + count2(n-1)
547 }
548
549 func count2(n int) int { return 1 + count3(n-1) }
550 func count3(n int) int { return 1 + count4(n-1) }
551 func count4(n int) int { return 1 + count5(n-1) }
552 func count5(n int) int { return 1 + count6(n-1) }
553 func count6(n int) int { return 1 + count7(n-1) }
554 func count7(n int) int { return 1 + count8(n-1) }
555 func count8(n int) int { return 1 + count9(n-1) }
556 func count9(n int) int { return 1 + count10(n-1) }
557 func count10(n int) int { return 1 + count11(n-1) }
558 func count11(n int) int { return 1 + count12(n-1) }
559 func count12(n int) int { return 1 + count13(n-1) }
560 func count13(n int) int { return 1 + count14(n-1) }
561 func count14(n int) int { return 1 + count15(n-1) }
562 func count15(n int) int { return 1 + count16(n-1) }
563 func count16(n int) int { return 1 + count17(n-1) }
564 func count17(n int) int { return 1 + count18(n-1) }
565 func count18(n int) int { return 1 + count19(n-1) }
566 func count19(n int) int { return 1 + count20(n-1) }
567 func count20(n int) int { return 1 + count21(n-1) }
568 func count21(n int) int { return 1 + count22(n-1) }
569 func count22(n int) int { return 1 + count23(n-1) }
570 func count23(n int) int { return 1 + count1(n-1) }
571
572 type stkobjT struct {
573 p *stkobjT
574 x int64
575 y [20]int
576 }
577
578
579 func Sum(n int64, p *stkobjT) {
580 if n == 0 {
581 return
582 }
583 s := stkobjT{p: p, x: n}
584 Sum(n-1, &s)
585 p.x += s.x
586 }
587
588 func BenchmarkStackCopyWithStkobj(b *testing.B) {
589 c := make(chan bool)
590 for i := 0; i < b.N; i++ {
591 go func() {
592 var s stkobjT
593 Sum(100000, &s)
594 c <- true
595 }()
596 <-c
597 }
598 }
599
600 func BenchmarkIssue18138(b *testing.B) {
601
602 const N = 10
603 c := make(chan []byte, N)
604 for i := 0; i < N; i++ {
605 c <- make([]byte, 1)
606 }
607
608 for i := 0; i < b.N; i++ {
609 <-c
610 go func() {
611 useStackPtrs(1000, false)
612 m := make([]byte, 8192)
613 c <- m
614 }()
615 }
616 }
617
618 func useStackPtrs(n int, b bool) {
619 if b {
620
621
622
623 var a [128]*int
624 a[n] = &n
625 n = *a[0]
626 }
627 if n == 0 {
628 return
629 }
630 useStackPtrs(n-1, b)
631 }
632
633 type structWithMethod struct{}
634
635 func (s structWithMethod) caller() string {
636 _, file, line, ok := Caller(1)
637 if !ok {
638 panic("Caller failed")
639 }
640 return fmt.Sprintf("%s:%d", file, line)
641 }
642
643 func (s structWithMethod) callers() []uintptr {
644 pc := make([]uintptr, 16)
645 return pc[:Callers(0, pc)]
646 }
647
648 func (s structWithMethod) stack() string {
649 buf := make([]byte, 4<<10)
650 return string(buf[:Stack(buf, false)])
651 }
652
653 func (s structWithMethod) nop() {}
654
655 func (s structWithMethod) inlinablePanic() { panic("panic") }
656
657 func TestStackWrapperCaller(t *testing.T) {
658 var d structWithMethod
659
660 wrapper := (*structWithMethod).caller
661
662 if dc, ic := d.caller(), wrapper(&d); dc != ic {
663 t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
664 }
665 }
666
667 func TestStackWrapperCallers(t *testing.T) {
668 var d structWithMethod
669 wrapper := (*structWithMethod).callers
670
671 pcs := wrapper(&d)
672 frames := CallersFrames(pcs)
673 for {
674 fr, more := frames.Next()
675 if fr.File == "<autogenerated>" {
676 t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
677 }
678 if !more {
679 break
680 }
681 }
682 }
683
684 func TestStackWrapperStack(t *testing.T) {
685 var d structWithMethod
686 wrapper := (*structWithMethod).stack
687
688 stk := wrapper(&d)
689 if strings.Contains(stk, "<autogenerated>") {
690 t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
691 }
692 }
693
694 func TestStackWrapperStackInlinePanic(t *testing.T) {
695
696
697
698
699 var d structWithMethod
700 wrapper := (*structWithMethod).inlinablePanic
701 defer func() {
702 err := recover()
703 if err == nil {
704 t.Fatalf("expected panic")
705 }
706 buf := make([]byte, 4<<10)
707 stk := string(buf[:Stack(buf, false)])
708 if strings.Contains(stk, "<autogenerated>") {
709 t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
710 }
711
712 if !testenv.OptimizationOff() {
713 if !strings.Contains(stk, "inlinablePanic(...)") {
714 t.Fatalf("inlinablePanic not inlined")
715 }
716 }
717 }()
718 wrapper(&d)
719 }
720
721 type I interface {
722 M()
723 }
724
725 func TestStackWrapperStackPanic(t *testing.T) {
726 t.Run("sigpanic", func(t *testing.T) {
727
728 testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
729 })
730 t.Run("panicwrap", func(t *testing.T) {
731
732 wrapper := (*structWithMethod).nop
733 testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
734 })
735 }
736
737 func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
738
739
740 t.Run("CallersFrames", func(t *testing.T) {
741 defer func() {
742 err := recover()
743 if err == nil {
744 t.Fatalf("expected panic")
745 }
746 pcs := make([]uintptr, 10)
747 n := Callers(0, pcs)
748 frames := CallersFrames(pcs[:n])
749 for {
750 frame, more := frames.Next()
751 t.Log(frame.Function)
752 if frame.Function == expect {
753 return
754 }
755 if !more {
756 break
757 }
758 }
759 t.Fatalf("panicking wrapper %s missing from stack trace", expect)
760 }()
761 cb()
762 })
763 t.Run("Stack", func(t *testing.T) {
764 defer func() {
765 err := recover()
766 if err == nil {
767 t.Fatalf("expected panic")
768 }
769 buf := make([]byte, 4<<10)
770 stk := string(buf[:Stack(buf, false)])
771 if !strings.Contains(stk, "\n"+expect) {
772 t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
773 }
774 }()
775 cb()
776 })
777 }
778
779 func TestCallersFromWrapper(t *testing.T) {
780
781
782
783
784
785 pc := reflect.ValueOf(I.M).Pointer()
786 frames := CallersFrames([]uintptr{pc})
787 frame, more := frames.Next()
788 if frame.Function != "runtime_test.I.M" {
789 t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
790 }
791 if more {
792 t.Fatalf("want 1 frame, got > 1")
793 }
794 }
795
796 func TestTracebackSystemstack(t *testing.T) {
797 if GOARCH == "ppc64" || GOARCH == "ppc64le" {
798 t.Skip("systemstack tail call not implemented on ppc64x")
799 }
800
801
802
803 pcs := make([]uintptr, 20)
804 pcs = pcs[:TracebackSystemstack(pcs, 5)]
805
806
807 countIn, countOut := 0, 0
808 frames := CallersFrames(pcs)
809 var tb strings.Builder
810 for {
811 frame, more := frames.Next()
812 fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
813 switch frame.Function {
814 case "runtime.TracebackSystemstack":
815 countIn++
816 case "runtime_test.TestTracebackSystemstack":
817 countOut++
818 }
819 if !more {
820 break
821 }
822 }
823 if countIn != 5 || countOut != 1 {
824 t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
825 }
826 }
827
828 func TestTracebackAncestors(t *testing.T) {
829 goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
830 for _, tracebackDepth := range []int{0, 1, 5, 50} {
831 output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
832
833 numGoroutines := 3
834 numFrames := 2
835 ancestorsExpected := numGoroutines
836 if numGoroutines > tracebackDepth {
837 ancestorsExpected = tracebackDepth
838 }
839
840 matches := goroutineRegex.FindAllStringSubmatch(output, -1)
841 if len(matches) != 2 {
842 t.Fatalf("want 2 goroutines, got:\n%s", output)
843 }
844
845
846 fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
847 for _, fn := range fns {
848 if !strings.Contains(output, "\n"+fn+"(") {
849 t.Fatalf("expected %q function in traceback:\n%s", fn, output)
850 }
851 }
852
853 if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
854 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
855 }
856
857 if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
858 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
859 }
860
861 if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
862 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
863 }
864 }
865 }
866
867
868 func TestDeferLiveness(t *testing.T) {
869 output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
870 if output != "" {
871 t.Errorf("output:\n%s\n\nwant no output", output)
872 }
873 }
874
875 func TestDeferHeapAndStack(t *testing.T) {
876 P := 4
877 N := 10000
878 D := 200
879
880 if testing.Short() {
881 P /= 2
882 N /= 10
883 D /= 10
884 }
885 c := make(chan bool)
886 for p := 0; p < P; p++ {
887 go func() {
888 for i := 0; i < N; i++ {
889 if deferHeapAndStack(D) != 2*D {
890 panic("bad result")
891 }
892 }
893 c <- true
894 }()
895 }
896 for p := 0; p < P; p++ {
897 <-c
898 }
899 }
900
901
902 func deferHeapAndStack(n int) (r int) {
903 if n == 0 {
904 return 0
905 }
906 if n%2 == 0 {
907
908 for i := 0; i < 2; i++ {
909 defer func() {
910 r++
911 }()
912 }
913 } else {
914
915 defer func() {
916 r++
917 }()
918 defer func() {
919 r++
920 }()
921 }
922 r = deferHeapAndStack(n - 1)
923 escapeMe(new([1024]byte))
924 return
925 }
926
927
928 var escapeMe = func(x any) {}
929
930 func TestFramePointerAdjust(t *testing.T) {
931 switch GOARCH {
932 case "amd64", "arm64":
933 default:
934 t.Skipf("frame pointer is not supported on %s", GOARCH)
935 }
936 if asan.Enabled {
937 t.Skip("skipping test: ASAN forces heap allocation")
938 }
939 output := runTestProg(t, "testprog", "FramePointerAdjust")
940 if output != "" {
941 t.Errorf("output:\n%s\n\nwant no output", output)
942 }
943 }
944
945
946
947
948 func TestSystemstackFramePointerAdjust(t *testing.T) {
949 growAndShrinkStack(512, [1024]byte{})
950 }
951
952
953
954
955
956 func growAndShrinkStack(n int, stackBallast [1024]byte) {
957 if n <= 0 {
958 return
959 }
960 growAndShrinkStack(n-1, stackBallast)
961 ShrinkStackAndVerifyFramePointers()
962 }
963
View as plain text