Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/gc"
15 "internal/runtime/sys"
16 "unsafe"
17 )
18
19 var Fadd64 = fadd64
20 var Fsub64 = fsub64
21 var Fmul64 = fmul64
22 var Fdiv64 = fdiv64
23 var F64to32 = f64to32
24 var F32to64 = f32to64
25 var Fcmp64 = fcmp64
26 var Fintto64 = fintto64
27 var F64toint = f64toint
28
29 var Entersyscall = entersyscall
30 var Exitsyscall = exitsyscall
31 var LockedOSThread = lockedOSThread
32 var Xadduintptr = atomic.Xadduintptr
33
34 var ReadRandomFailed = &readRandomFailed
35
36 var Fastlog2 = fastlog2
37
38 var ParseByteCount = parseByteCount
39
40 var Nanotime = nanotime
41 var Cputicks = cputicks
42 var CyclesPerSecond = pprof_cyclesPerSecond
43 var NetpollBreak = netpollBreak
44 var Usleep = usleep
45
46 var PhysPageSize = physPageSize
47 var PhysHugePageSize = physHugePageSize
48
49 var NetpollGenericInit = netpollGenericInit
50
51 var Memmove = memmove
52 var MemclrNoHeapPointers = memclrNoHeapPointers
53
54 var CgoCheckPointer = cgoCheckPointer
55
56 const CrashStackImplemented = crashStackImplemented
57
58 const TracebackInnerFrames = tracebackInnerFrames
59 const TracebackOuterFrames = tracebackOuterFrames
60
61 var LockPartialOrder = lockPartialOrder
62
63 type TimeTimer = timeTimer
64
65 type LockRank lockRank
66
67 func (l LockRank) String() string {
68 return lockRank(l).String()
69 }
70
71 const PreemptMSupported = preemptMSupported
72
73 type LFNode struct {
74 Next uint64
75 Pushcnt uintptr
76 }
77
78 func LFStackPush(head *uint64, node *LFNode) {
79 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
80 }
81
82 func LFStackPop(head *uint64) *LFNode {
83 return (*LFNode)((*lfstack)(head).pop())
84 }
85 func LFNodeValidate(node *LFNode) {
86 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
87 }
88
89 func Netpoll(delta int64) {
90 systemstack(func() {
91 netpoll(delta)
92 })
93 }
94
95 func PointerMask(x any) (ret []byte) {
96 systemstack(func() {
97 ret = pointerMask(x)
98 })
99 return
100 }
101
102 func RunSchedLocalQueueTest() {
103 pp := new(p)
104 gs := make([]g, len(pp.runq))
105 Escape(gs)
106 for i := 0; i < len(pp.runq); i++ {
107 if g, _ := runqget(pp); g != nil {
108 throw("runq is not empty initially")
109 }
110 for j := 0; j < i; j++ {
111 runqput(pp, &gs[i], false)
112 }
113 for j := 0; j < i; j++ {
114 if g, _ := runqget(pp); g != &gs[i] {
115 print("bad element at iter ", i, "/", j, "\n")
116 throw("bad element")
117 }
118 }
119 if g, _ := runqget(pp); g != nil {
120 throw("runq is not empty afterwards")
121 }
122 }
123 }
124
125 func RunSchedLocalQueueStealTest() {
126 p1 := new(p)
127 p2 := new(p)
128 gs := make([]g, len(p1.runq))
129 Escape(gs)
130 for i := 0; i < len(p1.runq); i++ {
131 for j := 0; j < i; j++ {
132 gs[j].sig = 0
133 runqput(p1, &gs[j], false)
134 }
135 gp := runqsteal(p2, p1, true)
136 s := 0
137 if gp != nil {
138 s++
139 gp.sig++
140 }
141 for {
142 gp, _ = runqget(p2)
143 if gp == nil {
144 break
145 }
146 s++
147 gp.sig++
148 }
149 for {
150 gp, _ = runqget(p1)
151 if gp == nil {
152 break
153 }
154 gp.sig++
155 }
156 for j := 0; j < i; j++ {
157 if gs[j].sig != 1 {
158 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
159 throw("bad element")
160 }
161 }
162 if s != i/2 && s != i/2+1 {
163 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
164 throw("bad steal")
165 }
166 }
167 }
168
169 func RunSchedLocalQueueEmptyTest(iters int) {
170
171
172
173
174 done := make(chan bool, 1)
175 p := new(p)
176 gs := make([]g, 2)
177 Escape(gs)
178 ready := new(uint32)
179 for i := 0; i < iters; i++ {
180 *ready = 0
181 next0 := (i & 1) == 0
182 next1 := (i & 2) == 0
183 runqput(p, &gs[0], next0)
184 go func() {
185 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
186 }
187 if runqempty(p) {
188 println("next:", next0, next1)
189 throw("queue is empty")
190 }
191 done <- true
192 }()
193 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
194 }
195 runqput(p, &gs[1], next1)
196 runqget(p)
197 <-done
198 runqget(p)
199 }
200 }
201
202 var (
203 StringHash = stringHash
204 BytesHash = bytesHash
205 Int32Hash = int32Hash
206 Int64Hash = int64Hash
207 MemHash = memhash
208 MemHash32 = memhash32
209 MemHash64 = memhash64
210 EfaceHash = efaceHash
211 IfaceHash = ifaceHash
212 )
213
214 var UseAeshash = &useAeshash
215
216 func MemclrBytes(b []byte) {
217 s := (*slice)(unsafe.Pointer(&b))
218 memclrNoHeapPointers(s.array, uintptr(s.len))
219 }
220
221 const HashLoad = hashLoad
222
223
224 func GostringW(w []uint16) (s string) {
225 systemstack(func() {
226 s = gostringw(&w[0])
227 })
228 return
229 }
230
231 var Open = open
232 var Close = closefd
233 var Read = read
234 var Write = write
235
236 func Envs() []string { return envs }
237 func SetEnvs(e []string) { envs = e }
238
239 const PtrSize = goarch.PtrSize
240
241 const ClobberdeadPtr = clobberdeadPtr
242
243 func Clobberfree() bool {
244 return debug.clobberfree != 0
245 }
246
247 var ForceGCPeriod = &forcegcperiod
248
249
250
251
252 func SetTracebackEnv(level string) {
253 setTraceback(level)
254 traceback_env = traceback_cache
255 }
256
257 var ReadUnaligned32 = readUnaligned32
258 var ReadUnaligned64 = readUnaligned64
259
260 func CountPagesInUse() (pagesInUse, counted uintptr) {
261 stw := stopTheWorld(stwForTestCountPagesInUse)
262
263 pagesInUse = mheap_.pagesInUse.Load()
264
265 for _, s := range mheap_.allspans {
266 if s.state.get() == mSpanInUse {
267 counted += s.npages
268 }
269 }
270
271 startTheWorld(stw)
272
273 return
274 }
275
276 func Fastrand() uint32 { return uint32(rand()) }
277 func Fastrand64() uint64 { return rand() }
278 func Fastrandn(n uint32) uint32 { return randn(n) }
279
280 type ProfBuf profBuf
281
282 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
283 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
284 }
285
286 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
287 (*profBuf)(p).write(tag, now, hdr, stk)
288 }
289
290 const (
291 ProfBufBlocking = profBufBlocking
292 ProfBufNonBlocking = profBufNonBlocking
293 )
294
295 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
296 return (*profBuf)(p).read(mode)
297 }
298
299 func (p *ProfBuf) Close() {
300 (*profBuf)(p).close()
301 }
302
303 type CPUStats = cpuStats
304
305 func ReadCPUStats() CPUStats {
306 return work.cpuStats
307 }
308
309 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
310 stw := stopTheWorld(stwForTestReadMetricsSlow)
311
312
313
314 metricsLock()
315 initMetrics()
316
317 systemstack(func() {
318
319
320 getg().racectx = getg().m.curg.racectx
321
322
323
324
325
326
327 readMetricsLocked(samplesp, len, cap)
328
329
330
331
332
333 readmemstats_m(memStats)
334
335
336
337
338 readMetricsLocked(samplesp, len, cap)
339
340
341 getg().racectx = 0
342 })
343 metricsUnlock()
344
345 startTheWorld(stw)
346 }
347
348 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
349
350
351
352 func ReadMemStatsSlow() (base, slow MemStats) {
353 stw := stopTheWorld(stwForTestReadMemStatsSlow)
354
355
356 systemstack(func() {
357
358 getg().m.mallocing++
359
360 readmemstats_m(&base)
361
362
363
364 slow = base
365 slow.Alloc = 0
366 slow.TotalAlloc = 0
367 slow.Mallocs = 0
368 slow.Frees = 0
369 slow.HeapReleased = 0
370 var bySize [gc.NumSizeClasses]struct {
371 Mallocs, Frees uint64
372 }
373
374
375 for _, s := range mheap_.allspans {
376 if s.state.get() != mSpanInUse {
377 continue
378 }
379 if s.isUnusedUserArenaChunk() {
380 continue
381 }
382 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
383 slow.Mallocs++
384 slow.Alloc += uint64(s.elemsize)
385 } else {
386 slow.Mallocs += uint64(s.allocCount)
387 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
388 bySize[sizeclass].Mallocs += uint64(s.allocCount)
389 }
390 }
391
392
393 var m heapStatsDelta
394 memstats.heapStats.unsafeRead(&m)
395
396
397 var smallFree uint64
398 for i := 0; i < gc.NumSizeClasses; i++ {
399 slow.Frees += m.smallFreeCount[i]
400 bySize[i].Frees += m.smallFreeCount[i]
401 bySize[i].Mallocs += m.smallFreeCount[i]
402 smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
403 }
404 slow.Frees += m.tinyAllocCount + m.largeFreeCount
405 slow.Mallocs += slow.Frees
406
407 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
408
409 for i := range slow.BySize {
410 slow.BySize[i].Mallocs = bySize[i].Mallocs
411 slow.BySize[i].Frees = bySize[i].Frees
412 }
413
414 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
415 chunk := mheap_.pages.tryChunkOf(i)
416 if chunk == nil {
417 continue
418 }
419 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
420 slow.HeapReleased += uint64(pg) * pageSize
421 }
422 for _, p := range allp {
423
424 pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
425 slow.HeapReleased += uint64(pg) * pageSize
426 }
427
428 getg().m.mallocing--
429 })
430
431 startTheWorld(stw)
432 return
433 }
434
435
436
437
438 func ShrinkStackAndVerifyFramePointers() {
439 before := stackPoisonCopy
440 defer func() { stackPoisonCopy = before }()
441 stackPoisonCopy = 1
442
443 gp := getg()
444 systemstack(func() {
445 shrinkstack(gp)
446 })
447
448
449 FPCallers(make([]uintptr, 1024))
450 }
451
452
453
454
455 func BlockOnSystemStack() {
456 systemstack(blockOnSystemStackInternal)
457 }
458
459 func blockOnSystemStackInternal() {
460 print("x\n")
461 lock(&deadlock)
462 lock(&deadlock)
463 }
464
465 type RWMutex struct {
466 rw rwmutex
467 }
468
469 func (rw *RWMutex) Init() {
470 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
471 }
472
473 func (rw *RWMutex) RLock() {
474 rw.rw.rlock()
475 }
476
477 func (rw *RWMutex) RUnlock() {
478 rw.rw.runlock()
479 }
480
481 func (rw *RWMutex) Lock() {
482 rw.rw.lock()
483 }
484
485 func (rw *RWMutex) Unlock() {
486 rw.rw.unlock()
487 }
488
489 func LockOSCounts() (external, internal uint32) {
490 gp := getg()
491 if gp.m.lockedExt+gp.m.lockedInt == 0 {
492 if gp.lockedm != 0 {
493 panic("lockedm on non-locked goroutine")
494 }
495 } else {
496 if gp.lockedm == 0 {
497 panic("nil lockedm on locked goroutine")
498 }
499 }
500 return gp.m.lockedExt, gp.m.lockedInt
501 }
502
503
504 func TracebackSystemstack(stk []uintptr, i int) int {
505 if i == 0 {
506 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
507 var u unwinder
508 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
509 return tracebackPCs(&u, 0, stk)
510 }
511 n := 0
512 systemstack(func() {
513 n = TracebackSystemstack(stk, i-1)
514 })
515 return n
516 }
517
518 func KeepNArenaHints(n int) {
519 hint := mheap_.arenaHints
520 for i := 1; i < n; i++ {
521 hint = hint.next
522 if hint == nil {
523 return
524 }
525 }
526 hint.next = nil
527 }
528
529
530
531
532
533
534
535 func MapNextArenaHint() (start, end uintptr, ok bool) {
536 hint := mheap_.arenaHints
537 addr := hint.addr
538 if hint.down {
539 start, end = addr-heapArenaBytes, addr
540 addr -= physPageSize
541 } else {
542 start, end = addr, addr+heapArenaBytes
543 }
544 got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
545 ok = (addr == uintptr(got))
546 if !ok {
547
548
549 sysFreeOS(got, physPageSize)
550 }
551 return
552 }
553
554 func GetNextArenaHint() uintptr {
555 return mheap_.arenaHints.addr
556 }
557
558 type G = g
559
560 type Sudog = sudog
561
562 type XRegPerG = xRegPerG
563
564 func Getg() *G {
565 return getg()
566 }
567
568 func Goid() uint64 {
569 return getg().goid
570 }
571
572 func GIsWaitingOnMutex(gp *G) bool {
573 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
574 }
575
576 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
577
578
579 func PanicForTesting(b []byte, i int) byte {
580 return unexportedPanicForTesting(b, i)
581 }
582
583
584 func unexportedPanicForTesting(b []byte, i int) byte {
585 return b[i]
586 }
587
588 func G0StackOverflow() {
589 systemstack(func() {
590 g0 := getg()
591 sp := sys.GetCallerSP()
592
593
594
595 g0.stack.lo = sp - 4096 - stackSystem
596 g0.stackguard0 = g0.stack.lo + stackGuard
597 g0.stackguard1 = g0.stackguard0
598
599 stackOverflow(nil)
600 })
601 }
602
603 func stackOverflow(x *byte) {
604 var buf [256]byte
605 stackOverflow(&buf[0])
606 }
607
608 func RunGetgThreadSwitchTest() {
609
610
611
612
613
614
615 ch := make(chan int)
616 go func(ch chan int) {
617 ch <- 5
618 LockOSThread()
619 }(ch)
620
621 g1 := getg()
622
623
624
625
626
627 <-ch
628
629 g2 := getg()
630 if g1 != g2 {
631 panic("g1 != g2")
632 }
633
634
635
636 g3 := getg()
637 if g1 != g3 {
638 panic("g1 != g3")
639 }
640 }
641
642
643 func Freegc(p unsafe.Pointer, size uintptr, noscan bool) {
644 freegc(p, size, noscan)
645 }
646
647
648 func AssistCredit() int64 {
649 assistG := getg()
650 if assistG.m.curg != nil {
651 assistG = assistG.m.curg
652 }
653 return assistG.gcAssistBytes
654 }
655
656
657 func GcBlackenEnable() bool {
658
659
660
661
662
663 return gcBlackenEnabled != 0
664 }
665
666 const SizeSpecializedMallocEnabled = sizeSpecializedMallocEnabled
667
668 const RuntimeFreegcEnabled = runtimeFreegcEnabled
669
670 const (
671 PageSize = pageSize
672 PallocChunkPages = pallocChunkPages
673 PageAlloc64Bit = pageAlloc64Bit
674 PallocSumBytes = pallocSumBytes
675 )
676
677
678 type PallocSum pallocSum
679
680 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
681 func (m PallocSum) Start() uint { return pallocSum(m).start() }
682 func (m PallocSum) Max() uint { return pallocSum(m).max() }
683 func (m PallocSum) End() uint { return pallocSum(m).end() }
684
685
686 type PallocBits pallocBits
687
688 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
689 return (*pallocBits)(b).find(npages, searchIdx)
690 }
691 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
692 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
693 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
694 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
695
696
697
698 func SummarizeSlow(b *PallocBits) PallocSum {
699 var start, most, end uint
700
701 const N = uint(len(b)) * 64
702 for start < N && (*pageBits)(b).get(start) == 0 {
703 start++
704 }
705 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
706 end++
707 }
708 run := uint(0)
709 for i := uint(0); i < N; i++ {
710 if (*pageBits)(b).get(i) == 0 {
711 run++
712 } else {
713 run = 0
714 }
715 most = max(most, run)
716 }
717 return PackPallocSum(start, most, end)
718 }
719
720
721 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
722
723
724
725 func DiffPallocBits(a, b *PallocBits) []BitRange {
726 ba := (*pageBits)(a)
727 bb := (*pageBits)(b)
728
729 var d []BitRange
730 base, size := uint(0), uint(0)
731 for i := uint(0); i < uint(len(ba))*64; i++ {
732 if ba.get(i) != bb.get(i) {
733 if size == 0 {
734 base = i
735 }
736 size++
737 } else {
738 if size != 0 {
739 d = append(d, BitRange{base, size})
740 }
741 size = 0
742 }
743 }
744 if size != 0 {
745 d = append(d, BitRange{base, size})
746 }
747 return d
748 }
749
750
751
752
753 func StringifyPallocBits(b *PallocBits, r BitRange) string {
754 str := ""
755 for j := r.I; j < r.I+r.N; j++ {
756 if (*pageBits)(b).get(j) != 0 {
757 str += "1"
758 } else {
759 str += "0"
760 }
761 }
762 return str
763 }
764
765
766 type PallocData pallocData
767
768 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
769 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
770 }
771 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
772 func (d *PallocData) ScavengedSetRange(i, n uint) {
773 (*pallocData)(d).scavenged.setRange(i, n)
774 }
775 func (d *PallocData) PallocBits() *PallocBits {
776 return (*PallocBits)(&(*pallocData)(d).pallocBits)
777 }
778 func (d *PallocData) Scavenged() *PallocBits {
779 return (*PallocBits)(&(*pallocData)(d).scavenged)
780 }
781
782
783 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
784
785
786 type PageCache pageCache
787
788 const PageCachePages = pageCachePages
789
790 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
791 return PageCache(pageCache{base: base, cache: cache, scav: scav})
792 }
793 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
794 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
795 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
796 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
797 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
798 return (*pageCache)(c).alloc(npages)
799 }
800 func (c *PageCache) Flush(s *PageAlloc) {
801 cp := (*pageCache)(c)
802 sp := (*pageAlloc)(s)
803
804 systemstack(func() {
805
806
807 lock(sp.mheapLock)
808 cp.flush(sp)
809 unlock(sp.mheapLock)
810 })
811 }
812
813
814 type ChunkIdx chunkIdx
815
816
817
818 type PageAlloc pageAlloc
819
820 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
821 pp := (*pageAlloc)(p)
822
823 var addr, scav uintptr
824 systemstack(func() {
825
826
827 lock(pp.mheapLock)
828 addr, scav = pp.alloc(npages)
829 unlock(pp.mheapLock)
830 })
831 return addr, scav
832 }
833 func (p *PageAlloc) AllocToCache() PageCache {
834 pp := (*pageAlloc)(p)
835
836 var c PageCache
837 systemstack(func() {
838
839
840 lock(pp.mheapLock)
841 c = PageCache(pp.allocToCache())
842 unlock(pp.mheapLock)
843 })
844 return c
845 }
846 func (p *PageAlloc) Free(base, npages uintptr) {
847 pp := (*pageAlloc)(p)
848
849 systemstack(func() {
850
851
852 lock(pp.mheapLock)
853 pp.free(base, npages)
854 unlock(pp.mheapLock)
855 })
856 }
857 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
858 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
859 }
860 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
861 pp := (*pageAlloc)(p)
862 systemstack(func() {
863 r = pp.scavenge(nbytes, nil, true)
864 })
865 return
866 }
867 func (p *PageAlloc) InUse() []AddrRange {
868 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
869 for _, r := range p.inUse.ranges {
870 ranges = append(ranges, AddrRange{r})
871 }
872 return ranges
873 }
874
875
876 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
877 ci := chunkIdx(i)
878 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
879 }
880
881
882 type AddrRange struct {
883 addrRange
884 }
885
886
887 func MakeAddrRange(base, limit uintptr) AddrRange {
888 return AddrRange{makeAddrRange(base, limit)}
889 }
890
891
892 func (a AddrRange) Base() uintptr {
893 return a.addrRange.base.addr()
894 }
895
896
897 func (a AddrRange) Limit() uintptr {
898 return a.addrRange.limit.addr()
899 }
900
901
902 func (a AddrRange) Equals(b AddrRange) bool {
903 return a == b
904 }
905
906
907 func (a AddrRange) Size() uintptr {
908 return a.addrRange.size()
909 }
910
911
912
913
914
915 var testSysStat = &memstats.other_sys
916
917
918 type AddrRanges struct {
919 addrRanges
920 mutable bool
921 }
922
923
924
925
926
927
928
929
930
931
932 func NewAddrRanges() AddrRanges {
933 r := addrRanges{}
934 r.init(testSysStat)
935 return AddrRanges{r, true}
936 }
937
938
939
940
941
942
943 func MakeAddrRanges(a ...AddrRange) AddrRanges {
944
945
946
947
948
949 ranges := make([]addrRange, 0, len(a))
950 total := uintptr(0)
951 for _, r := range a {
952 ranges = append(ranges, r.addrRange)
953 total += r.Size()
954 }
955 return AddrRanges{addrRanges{
956 ranges: ranges,
957 totalBytes: total,
958 sysStat: testSysStat,
959 }, false}
960 }
961
962
963
964 func (a *AddrRanges) Ranges() []AddrRange {
965 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
966 for _, r := range a.addrRanges.ranges {
967 result = append(result, AddrRange{r})
968 }
969 return result
970 }
971
972
973
974 func (a *AddrRanges) FindSucc(base uintptr) int {
975 return a.findSucc(base)
976 }
977
978
979
980
981
982 func (a *AddrRanges) Add(r AddrRange) {
983 if !a.mutable {
984 throw("attempt to mutate immutable AddrRanges")
985 }
986 a.add(r.addrRange)
987 }
988
989
990 func (a *AddrRanges) TotalBytes() uintptr {
991 return a.addrRanges.totalBytes
992 }
993
994
995 type BitRange struct {
996 I, N uint
997 }
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1014 p := new(pageAlloc)
1015
1016
1017 p.init(new(mutex), testSysStat, true)
1018 lockInit(p.mheapLock, lockRankMheap)
1019 for i, init := range chunks {
1020 addr := chunkBase(chunkIdx(i))
1021
1022
1023 systemstack(func() {
1024 lock(p.mheapLock)
1025 p.grow(addr, pallocChunkBytes)
1026 unlock(p.mheapLock)
1027 })
1028
1029
1030 ci := chunkIndex(addr)
1031 chunk := p.chunkOf(ci)
1032
1033
1034 chunk.scavenged.clearRange(0, pallocChunkPages)
1035
1036
1037
1038
1039 p.scav.index.alloc(ci, pallocChunkPages)
1040 p.scav.index.free(ci, 0, pallocChunkPages)
1041
1042
1043 if scav != nil {
1044 if scvg, ok := scav[i]; ok {
1045 for _, s := range scvg {
1046
1047
1048 if s.N != 0 {
1049 chunk.scavenged.setRange(s.I, s.N)
1050 }
1051 }
1052 }
1053 }
1054
1055
1056 for _, s := range init {
1057
1058
1059 if s.N != 0 {
1060 chunk.allocRange(s.I, s.N)
1061
1062
1063 p.scav.index.alloc(ci, s.N)
1064 }
1065 }
1066
1067
1068 systemstack(func() {
1069 lock(p.mheapLock)
1070 p.update(addr, pallocChunkPages, false, false)
1071 unlock(p.mheapLock)
1072 })
1073 }
1074
1075 return (*PageAlloc)(p)
1076 }
1077
1078
1079
1080
1081 func FreePageAlloc(pp *PageAlloc) {
1082 p := (*pageAlloc)(pp)
1083
1084
1085 if pageAlloc64Bit != 0 {
1086 for l := 0; l < summaryLevels; l++ {
1087 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1088 }
1089 } else {
1090 resSize := uintptr(0)
1091 for _, s := range p.summary {
1092 resSize += uintptr(cap(s)) * pallocSumBytes
1093 }
1094 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1095 }
1096
1097
1098 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1099
1100
1101
1102
1103
1104 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1105 testSysStat.add(-int64(p.summaryMappedReady))
1106
1107
1108 for i := range p.chunks {
1109 if x := p.chunks[i]; x != nil {
1110 p.chunks[i] = nil
1111
1112 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1113 }
1114 }
1115 }
1116
1117
1118
1119
1120
1121
1122
1123 var BaseChunkIdx = func() ChunkIdx {
1124 var prefix uintptr
1125 if pageAlloc64Bit != 0 {
1126 prefix = 0xc000
1127 } else {
1128 prefix = 0x100
1129 }
1130 baseAddr := prefix * pallocChunkBytes
1131 if goos.IsAix != 0 {
1132 baseAddr += arenaBaseOffset
1133 }
1134 return ChunkIdx(chunkIndex(baseAddr))
1135 }()
1136
1137
1138
1139 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1140 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1141 }
1142
1143 type BitsMismatch struct {
1144 Base uintptr
1145 Got, Want uint64
1146 }
1147
1148 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1149 ok = true
1150
1151
1152 systemstack(func() {
1153 getg().m.mallocing++
1154
1155
1156 lock(&mheap_.lock)
1157
1158 chunkLoop:
1159 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1160 chunk := mheap_.pages.tryChunkOf(i)
1161 if chunk == nil {
1162 continue
1163 }
1164 cb := chunkBase(i)
1165 for j := 0; j < pallocChunkPages/64; j++ {
1166
1167
1168
1169
1170
1171 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1172 got := chunk.scavenged[j]
1173 if want != got {
1174 ok = false
1175 if n >= len(mismatches) {
1176 break chunkLoop
1177 }
1178 mismatches[n] = BitsMismatch{
1179 Base: cb + uintptr(j)*64*pageSize,
1180 Got: got,
1181 Want: want,
1182 }
1183 n++
1184 }
1185 }
1186 }
1187 unlock(&mheap_.lock)
1188
1189 getg().m.mallocing--
1190 })
1191
1192 if randomizeHeapBase && len(mismatches) > 0 {
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 affectedArenas := map[arenaIdx]bool{}
1208 for _, mismatch := range mismatches {
1209 if mismatch.Base > 0 {
1210 affectedArenas[arenaIndex(mismatch.Base)] = true
1211 }
1212 }
1213 if len(affectedArenas) == 1 {
1214 ok = true
1215
1216 for i := range n {
1217 mismatches[i] = BitsMismatch{}
1218 }
1219 }
1220 }
1221
1222 return
1223 }
1224
1225 func PageCachePagesLeaked() (leaked uintptr) {
1226 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1227
1228
1229 deadp := allp[len(allp):cap(allp)]
1230 for _, p := range deadp {
1231
1232
1233 if p != nil {
1234 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1235 }
1236 }
1237
1238 startTheWorld(stw)
1239 return
1240 }
1241
1242 var ProcYield = procyield
1243 var OSYield = osyield
1244
1245 type Mutex = mutex
1246
1247 var Lock = lock
1248 var Unlock = unlock
1249
1250 var MutexContended = mutexContended
1251
1252 func SemRootLock(addr *uint32) *mutex {
1253 root := semtable.rootFor(addr)
1254 return &root.lock
1255 }
1256
1257 var Semacquire = semacquire
1258 var Semrelease1 = semrelease1
1259
1260 func SemNwait(addr *uint32) uint32 {
1261 root := semtable.rootFor(addr)
1262 return root.nwait.Load()
1263 }
1264
1265 const SemTableSize = semTabSize
1266
1267
1268 type SemTable struct {
1269 semTable
1270 }
1271
1272
1273 func (t *SemTable) Enqueue(addr *uint32) {
1274 s := acquireSudog()
1275 s.releasetime = 0
1276 s.acquiretime = 0
1277 s.ticket = 0
1278 t.semTable.rootFor(addr).queue(addr, s, false)
1279 }
1280
1281
1282
1283
1284 func (t *SemTable) Dequeue(addr *uint32) bool {
1285 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1286 if s != nil {
1287 releaseSudog(s)
1288 return true
1289 }
1290 return false
1291 }
1292
1293
1294 type MSpan mspan
1295
1296
1297 func AllocMSpan() *MSpan {
1298 var s *mspan
1299 systemstack(func() {
1300 lock(&mheap_.lock)
1301 s = (*mspan)(mheap_.spanalloc.alloc())
1302 s.init(0, 0)
1303 unlock(&mheap_.lock)
1304 })
1305 return (*MSpan)(s)
1306 }
1307
1308
1309 func FreeMSpan(s *MSpan) {
1310 systemstack(func() {
1311 lock(&mheap_.lock)
1312 mheap_.spanalloc.free(unsafe.Pointer(s))
1313 unlock(&mheap_.lock)
1314 })
1315 }
1316
1317 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1318 s := (*mspan)(ms)
1319 s.nelems = uint16(len(bits) * 8)
1320 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1321 result := s.countAlloc()
1322 s.gcmarkBits = nil
1323 return result
1324 }
1325
1326 const (
1327 TimeHistSubBucketBits = timeHistSubBucketBits
1328 TimeHistNumSubBuckets = timeHistNumSubBuckets
1329 TimeHistNumBuckets = timeHistNumBuckets
1330 TimeHistMinBucketBits = timeHistMinBucketBits
1331 TimeHistMaxBucketBits = timeHistMaxBucketBits
1332 )
1333
1334 type TimeHistogram timeHistogram
1335
1336
1337
1338
1339
1340 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1341 t := (*timeHistogram)(th)
1342 if bucket < 0 {
1343 return t.underflow.Load(), false
1344 }
1345 i := bucket*TimeHistNumSubBuckets + subBucket
1346 if i >= len(t.counts) {
1347 return t.overflow.Load(), false
1348 }
1349 return t.counts[i].Load(), true
1350 }
1351
1352 func (th *TimeHistogram) Record(duration int64) {
1353 (*timeHistogram)(th).record(duration)
1354 }
1355
1356 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1357
1358 func SetIntArgRegs(a int) int {
1359 lock(&finlock)
1360 old := intArgRegs
1361 if a >= 0 {
1362 intArgRegs = a
1363 }
1364 unlock(&finlock)
1365 return old
1366 }
1367
1368 func FinalizerGAsleep() bool {
1369 return fingStatus.Load()&fingWait != 0
1370 }
1371
1372
1373
1374
1375 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1376
1377
1378
1379 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1380 return gcTestIsReachable(ptrs...)
1381 }
1382
1383
1384
1385
1386
1387
1388
1389 func GCTestPointerClass(p unsafe.Pointer) string {
1390 return gcTestPointerClass(p)
1391 }
1392
1393 const Raceenabled = raceenabled
1394
1395 const (
1396 GCBackgroundUtilization = gcBackgroundUtilization
1397 GCGoalUtilization = gcGoalUtilization
1398 DefaultHeapMinimum = defaultHeapMinimum
1399 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1400 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1401 )
1402
1403 type GCController struct {
1404 gcControllerState
1405 }
1406
1407 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1408
1409
1410
1411
1412 g := Escape(new(GCController))
1413 g.gcControllerState.test = true
1414 g.init(int32(gcPercent), memoryLimit)
1415 return g
1416 }
1417
1418 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1419 trigger, _ := c.trigger()
1420 if c.heapMarked > trigger {
1421 trigger = c.heapMarked
1422 }
1423 c.maxStackScan.Store(stackSize)
1424 c.globalsScan.Store(globalsSize)
1425 c.heapLive.Store(trigger)
1426 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1427 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1428 }
1429
1430 func (c *GCController) AssistWorkPerByte() float64 {
1431 return c.assistWorkPerByte.Load()
1432 }
1433
1434 func (c *GCController) HeapGoal() uint64 {
1435 return c.heapGoal()
1436 }
1437
1438 func (c *GCController) HeapLive() uint64 {
1439 return c.heapLive.Load()
1440 }
1441
1442 func (c *GCController) HeapMarked() uint64 {
1443 return c.heapMarked
1444 }
1445
1446 func (c *GCController) Triggered() uint64 {
1447 return c.triggered
1448 }
1449
1450 type GCControllerReviseDelta struct {
1451 HeapLive int64
1452 HeapScan int64
1453 HeapScanWork int64
1454 StackScanWork int64
1455 GlobalsScanWork int64
1456 }
1457
1458 func (c *GCController) Revise(d GCControllerReviseDelta) {
1459 c.heapLive.Add(d.HeapLive)
1460 c.heapScan.Add(d.HeapScan)
1461 c.heapScanWork.Add(d.HeapScanWork)
1462 c.stackScanWork.Add(d.StackScanWork)
1463 c.globalsScanWork.Add(d.GlobalsScanWork)
1464 c.revise()
1465 }
1466
1467 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1468 c.assistTime.Store(assistTime)
1469 c.endCycle(elapsed, gomaxprocs, false)
1470 c.resetLive(bytesMarked)
1471 c.commit(false)
1472 }
1473
1474 func (c *GCController) AddIdleMarkWorker() bool {
1475 return c.addIdleMarkWorker()
1476 }
1477
1478 func (c *GCController) NeedIdleMarkWorker() bool {
1479 return c.needIdleMarkWorker()
1480 }
1481
1482 func (c *GCController) RemoveIdleMarkWorker() {
1483 c.removeIdleMarkWorker()
1484 }
1485
1486 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1487 c.setMaxIdleMarkWorkers(max)
1488 }
1489
1490 var alwaysFalse bool
1491 var escapeSink any
1492
1493 func Escape[T any](x T) T {
1494 if alwaysFalse {
1495 escapeSink = x
1496 }
1497 return x
1498 }
1499
1500
1501 func Acquirem() {
1502 acquirem()
1503 }
1504
1505 func Releasem() {
1506 releasem(getg().m)
1507 }
1508
1509
1510
1511
1512
1513
1514 func GoschedIfBusy() {
1515 goschedIfBusy()
1516 }
1517
1518 type PIController struct {
1519 piController
1520 }
1521
1522 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1523 return &PIController{piController{
1524 kp: kp,
1525 ti: ti,
1526 tt: tt,
1527 min: min,
1528 max: max,
1529 }}
1530 }
1531
1532 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1533 return c.piController.next(input, setpoint, period)
1534 }
1535
1536 const (
1537 CapacityPerProc = capacityPerProc
1538 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1539 )
1540
1541 type GCCPULimiter struct {
1542 limiter gcCPULimiterState
1543 }
1544
1545 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1546
1547
1548
1549
1550 l := Escape(new(GCCPULimiter))
1551 l.limiter.test = true
1552 l.limiter.resetCapacity(now, gomaxprocs)
1553 return l
1554 }
1555
1556 func (l *GCCPULimiter) Fill() uint64 {
1557 return l.limiter.bucket.fill
1558 }
1559
1560 func (l *GCCPULimiter) Capacity() uint64 {
1561 return l.limiter.bucket.capacity
1562 }
1563
1564 func (l *GCCPULimiter) Overflow() uint64 {
1565 return l.limiter.overflow
1566 }
1567
1568 func (l *GCCPULimiter) Limiting() bool {
1569 return l.limiter.limiting()
1570 }
1571
1572 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1573 return l.limiter.needUpdate(now)
1574 }
1575
1576 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1577 l.limiter.startGCTransition(enableGC, now)
1578 }
1579
1580 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1581 l.limiter.finishGCTransition(now)
1582 }
1583
1584 func (l *GCCPULimiter) Update(now int64) {
1585 l.limiter.update(now)
1586 }
1587
1588 func (l *GCCPULimiter) AddAssistTime(t int64) {
1589 l.limiter.addAssistTime(t)
1590 }
1591
1592 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1593 l.limiter.resetCapacity(now, nprocs)
1594 }
1595
1596 const ScavengePercent = scavengePercent
1597
1598 type Scavenger struct {
1599 Sleep func(int64) int64
1600 Scavenge func(uintptr) (uintptr, int64)
1601 ShouldStop func() bool
1602 GoMaxProcs func() int32
1603
1604 released atomic.Uintptr
1605 scavenger scavengerState
1606 stop chan<- struct{}
1607 done <-chan struct{}
1608 }
1609
1610 func (s *Scavenger) Start() {
1611 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1612 panic("must populate all stubs")
1613 }
1614
1615
1616 s.scavenger.sleepStub = s.Sleep
1617 s.scavenger.scavenge = s.Scavenge
1618 s.scavenger.shouldStop = s.ShouldStop
1619 s.scavenger.gomaxprocs = s.GoMaxProcs
1620
1621
1622 stop := make(chan struct{})
1623 s.stop = stop
1624 done := make(chan struct{})
1625 s.done = done
1626 go func() {
1627
1628 s.scavenger.init()
1629 s.scavenger.park()
1630 for {
1631 select {
1632 case <-stop:
1633 close(done)
1634 return
1635 default:
1636 }
1637 released, workTime := s.scavenger.run()
1638 if released == 0 {
1639 s.scavenger.park()
1640 continue
1641 }
1642 s.released.Add(released)
1643 s.scavenger.sleep(workTime)
1644 }
1645 }()
1646 if !s.BlockUntilParked(1e9 ) {
1647 panic("timed out waiting for scavenger to get ready")
1648 }
1649 }
1650
1651
1652
1653
1654
1655
1656
1657 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1658
1659
1660
1661
1662
1663 start := nanotime()
1664 for nanotime()-start < timeout {
1665 lock(&s.scavenger.lock)
1666 parked := s.scavenger.parked
1667 unlock(&s.scavenger.lock)
1668 if parked {
1669 return true
1670 }
1671 Gosched()
1672 }
1673 return false
1674 }
1675
1676
1677 func (s *Scavenger) Released() uintptr {
1678 return s.released.Load()
1679 }
1680
1681
1682 func (s *Scavenger) Wake() {
1683 s.scavenger.wake()
1684 }
1685
1686
1687
1688 func (s *Scavenger) Stop() {
1689 lock(&s.scavenger.lock)
1690 parked := s.scavenger.parked
1691 unlock(&s.scavenger.lock)
1692 if !parked {
1693 panic("tried to clean up scavenger that is not parked")
1694 }
1695 close(s.stop)
1696 s.Wake()
1697 <-s.done
1698 }
1699
1700 type ScavengeIndex struct {
1701 i scavengeIndex
1702 }
1703
1704 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1705 s := new(ScavengeIndex)
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717 s.i.chunks = make([]atomicScavChunkData, max)
1718 s.i.min.Store(uintptr(min))
1719 s.i.max.Store(uintptr(max))
1720 s.i.minHeapIdx.Store(uintptr(min))
1721 s.i.test = true
1722 return s
1723 }
1724
1725 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1726 ci, off := s.i.find(force)
1727 return ChunkIdx(ci), off
1728 }
1729
1730 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1731 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1732 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1733
1734 if sc == ec {
1735
1736 s.i.alloc(sc, ei+1-si)
1737 } else {
1738
1739 s.i.alloc(sc, pallocChunkPages-si)
1740 for c := sc + 1; c < ec; c++ {
1741 s.i.alloc(c, pallocChunkPages)
1742 }
1743 s.i.alloc(ec, ei+1)
1744 }
1745 }
1746
1747 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1748 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1749 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1750
1751 if sc == ec {
1752
1753 s.i.free(sc, si, ei+1-si)
1754 } else {
1755
1756 s.i.free(sc, si, pallocChunkPages-si)
1757 for c := sc + 1; c < ec; c++ {
1758 s.i.free(c, 0, pallocChunkPages)
1759 }
1760 s.i.free(ec, 0, ei+1)
1761 }
1762 }
1763
1764 func (s *ScavengeIndex) ResetSearchAddrs() {
1765 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1766 addr, marked := a.Load()
1767 if marked {
1768 a.StoreUnmark(addr, addr)
1769 }
1770 a.Clear()
1771 }
1772 s.i.freeHWM = minOffAddr
1773 }
1774
1775 func (s *ScavengeIndex) NextGen() {
1776 s.i.nextGen()
1777 }
1778
1779 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1780 s.i.setEmpty(chunkIdx(ci))
1781 }
1782
1783 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1784 sc0 := scavChunkData{
1785 gen: gen,
1786 inUse: inUse,
1787 lastInUse: lastInUse,
1788 scavChunkFlags: scavChunkFlags(flags),
1789 }
1790 scp := sc0.pack()
1791 sc1 := unpackScavChunkData(scp)
1792 return sc0 == sc1
1793 }
1794
1795 const GTrackingPeriod = gTrackingPeriod
1796
1797 var ZeroBase = unsafe.Pointer(&zerobase)
1798
1799 const UserArenaChunkBytes = userArenaChunkBytes
1800
1801 type UserArena struct {
1802 arena *userArena
1803 }
1804
1805 func NewUserArena() *UserArena {
1806 return &UserArena{newUserArena()}
1807 }
1808
1809 func (a *UserArena) New(out *any) {
1810 i := efaceOf(out)
1811 typ := i._type
1812 if typ.Kind() != abi.Pointer {
1813 panic("new result of non-ptr type")
1814 }
1815 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1816 i.data = a.arena.new(typ)
1817 }
1818
1819 func (a *UserArena) Slice(sl any, cap int) {
1820 a.arena.slice(sl, cap)
1821 }
1822
1823 func (a *UserArena) Free() {
1824 a.arena.free()
1825 }
1826
1827 func GlobalWaitingArenaChunks() int {
1828 n := 0
1829 systemstack(func() {
1830 lock(&mheap_.lock)
1831 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1832 n++
1833 }
1834 unlock(&mheap_.lock)
1835 })
1836 return n
1837 }
1838
1839 func UserArenaClone[T any](s T) T {
1840 return arena_heapify(s).(T)
1841 }
1842
1843 var AlignUp = alignUp
1844
1845 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1846 return blockUntilEmptyFinalizerQueue(timeout)
1847 }
1848
1849 func BlockUntilEmptyCleanupQueue(timeout int64) bool {
1850 return gcCleanups.blockUntilEmpty(timeout)
1851 }
1852
1853 func FrameStartLine(f *Frame) int {
1854 return f.startLine
1855 }
1856
1857
1858
1859 func PersistentAlloc(n, align uintptr) unsafe.Pointer {
1860 return persistentalloc(n, align, &memstats.other_sys)
1861 }
1862
1863 const TagAlign = tagAlign
1864
1865
1866
1867 func FPCallers(pcBuf []uintptr) int {
1868 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1869 }
1870
1871 const FramePointerEnabled = framepointer_enabled
1872
1873 var (
1874 IsPinned = isPinned
1875 GetPinCounter = pinnerGetPinCounter
1876 )
1877
1878 func SetPinnerLeakPanic(f func()) {
1879 pinnerLeakPanic = f
1880 }
1881 func GetPinnerLeakPanic() func() {
1882 return pinnerLeakPanic
1883 }
1884
1885 var testUintptr uintptr
1886
1887 func MyGenericFunc[T any]() {
1888 systemstack(func() {
1889 testUintptr = 4
1890 })
1891 }
1892
1893 func UnsafePoint(pc uintptr) bool {
1894 fi := findfunc(pc)
1895 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1896 switch v {
1897 case abi.UnsafePointUnsafe:
1898 return true
1899 case abi.UnsafePointSafe:
1900 return false
1901 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1902
1903
1904 return false
1905 default:
1906 var buf [20]byte
1907 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1908 }
1909 }
1910
1911 type TraceMap struct {
1912 traceMap
1913 }
1914
1915 func (m *TraceMap) PutString(s string) (uint64, bool) {
1916 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1917 }
1918
1919 func (m *TraceMap) Reset() {
1920 m.traceMap.reset()
1921 }
1922
1923 func SetSpinInGCMarkDone(spin bool) {
1924 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1925 }
1926
1927 func GCMarkDoneRestarted() bool {
1928
1929 mp := acquirem()
1930 if gcphase != _GCoff {
1931 releasem(mp)
1932 return false
1933 }
1934 restarted := gcDebugMarkDone.restartedDueTo27993
1935 releasem(mp)
1936 return restarted
1937 }
1938
1939 func GCMarkDoneResetRestartFlag() {
1940 mp := acquirem()
1941 for gcphase != _GCoff {
1942 releasem(mp)
1943 Gosched()
1944 mp = acquirem()
1945 }
1946 gcDebugMarkDone.restartedDueTo27993 = false
1947 releasem(mp)
1948 }
1949
1950 type BitCursor struct {
1951 b bitCursor
1952 }
1953
1954 func NewBitCursor(buf *byte) BitCursor {
1955 return BitCursor{b: bitCursor{ptr: buf, n: 0}}
1956 }
1957
1958 func (b BitCursor) Write(data *byte, cnt uintptr) {
1959 b.b.write(data, cnt)
1960 }
1961 func (b BitCursor) Offset(cnt uintptr) BitCursor {
1962 return BitCursor{b: b.b.offset(cnt)}
1963 }
1964
1965 const (
1966 BubbleAssocUnbubbled = bubbleAssocUnbubbled
1967 BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
1968 BubbleAssocOtherBubble = bubbleAssocOtherBubble
1969 )
1970
1971 type TraceStackTable traceStackTable
1972
1973 func (t *TraceStackTable) Reset() {
1974 t.tab.reset()
1975 }
1976
1977 func TraceStack(gp *G, tab *TraceStackTable) {
1978 traceStack(0, gp, (*traceStackTable)(tab))
1979 }
1980
1981 var DebugDecorateMappings = &debug.decoratemappings
1982
1983 func SetVMANameSupported() bool { return setVMANameSupported() }
1984
1985 type ListHead struct {
1986 l listHead
1987 }
1988
1989 func (head *ListHead) Init(off uintptr) {
1990 head.l.init(off)
1991 }
1992
1993 type ListNode struct {
1994 l listNode
1995 }
1996
1997 func (head *ListHead) Push(p unsafe.Pointer) {
1998 head.l.push(p)
1999 }
2000
2001 func (head *ListHead) Pop() unsafe.Pointer {
2002 return head.l.pop()
2003 }
2004
2005 func (head *ListHead) Remove(p unsafe.Pointer) {
2006 head.l.remove(p)
2007 }
2008
2009 type ListHeadManual struct {
2010 l listHeadManual
2011 }
2012
2013 func (head *ListHeadManual) Init(off uintptr) {
2014 head.l.init(off)
2015 }
2016
2017 type ListNodeManual struct {
2018 l listNodeManual
2019 }
2020
2021 func (head *ListHeadManual) Push(p unsafe.Pointer) {
2022 head.l.push(p)
2023 }
2024
2025 func (head *ListHeadManual) Pop() unsafe.Pointer {
2026 return head.l.pop()
2027 }
2028
2029 func (head *ListHeadManual) Remove(p unsafe.Pointer) {
2030 head.l.remove(p)
2031 }
2032
View as plain text