Source file
src/runtime/export_test.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "internal/runtime/gc"
15 "internal/runtime/sys"
16 "unsafe"
17 )
18
19 var Fadd64 = fadd64
20 var Fsub64 = fsub64
21 var Fmul64 = fmul64
22 var Fdiv64 = fdiv64
23 var F64to32 = f64to32
24 var F32to64 = f32to64
25 var Fcmp64 = fcmp64
26 var Fintto64 = fintto64
27 var F64toint = f64toint
28
29 var Entersyscall = entersyscall
30 var Exitsyscall = exitsyscall
31 var LockedOSThread = lockedOSThread
32 var Xadduintptr = atomic.Xadduintptr
33
34 var ReadRandomFailed = &readRandomFailed
35
36 var Fastlog2 = fastlog2
37
38 var ParseByteCount = parseByteCount
39
40 var Nanotime = nanotime
41 var Cputicks = cputicks
42 var CyclesPerSecond = pprof_cyclesPerSecond
43 var NetpollBreak = netpollBreak
44 var Usleep = usleep
45
46 var PhysPageSize = physPageSize
47 var PhysHugePageSize = physHugePageSize
48
49 var NetpollGenericInit = netpollGenericInit
50
51 var Memmove = memmove
52 var MemclrNoHeapPointers = memclrNoHeapPointers
53
54 var CgoCheckPointer = cgoCheckPointer
55
56 const CrashStackImplemented = crashStackImplemented
57
58 const TracebackInnerFrames = tracebackInnerFrames
59 const TracebackOuterFrames = tracebackOuterFrames
60
61 var LockPartialOrder = lockPartialOrder
62
63 type TimeTimer = timeTimer
64
65 type LockRank lockRank
66
67 func (l LockRank) String() string {
68 return lockRank(l).String()
69 }
70
71 const PreemptMSupported = preemptMSupported
72
73 type LFNode struct {
74 Next uint64
75 Pushcnt uintptr
76 }
77
78 func LFStackPush(head *uint64, node *LFNode) {
79 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
80 }
81
82 func LFStackPop(head *uint64) *LFNode {
83 return (*LFNode)((*lfstack)(head).pop())
84 }
85 func LFNodeValidate(node *LFNode) {
86 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
87 }
88
89 func Netpoll(delta int64) {
90 systemstack(func() {
91 netpoll(delta)
92 })
93 }
94
95 func PointerMask(x any) (ret []byte) {
96 systemstack(func() {
97 ret = pointerMask(x)
98 })
99 return
100 }
101
102 func RunSchedLocalQueueTest() {
103 pp := new(p)
104 gs := make([]g, len(pp.runq))
105 Escape(gs)
106 for i := 0; i < len(pp.runq); i++ {
107 if g, _ := runqget(pp); g != nil {
108 throw("runq is not empty initially")
109 }
110 for j := 0; j < i; j++ {
111 runqput(pp, &gs[i], false)
112 }
113 for j := 0; j < i; j++ {
114 if g, _ := runqget(pp); g != &gs[i] {
115 print("bad element at iter ", i, "/", j, "\n")
116 throw("bad element")
117 }
118 }
119 if g, _ := runqget(pp); g != nil {
120 throw("runq is not empty afterwards")
121 }
122 }
123 }
124
125 func RunSchedLocalQueueStealTest() {
126 p1 := new(p)
127 p2 := new(p)
128 gs := make([]g, len(p1.runq))
129 Escape(gs)
130 for i := 0; i < len(p1.runq); i++ {
131 for j := 0; j < i; j++ {
132 gs[j].sig = 0
133 runqput(p1, &gs[j], false)
134 }
135 gp := runqsteal(p2, p1, true)
136 s := 0
137 if gp != nil {
138 s++
139 gp.sig++
140 }
141 for {
142 gp, _ = runqget(p2)
143 if gp == nil {
144 break
145 }
146 s++
147 gp.sig++
148 }
149 for {
150 gp, _ = runqget(p1)
151 if gp == nil {
152 break
153 }
154 gp.sig++
155 }
156 for j := 0; j < i; j++ {
157 if gs[j].sig != 1 {
158 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
159 throw("bad element")
160 }
161 }
162 if s != i/2 && s != i/2+1 {
163 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
164 throw("bad steal")
165 }
166 }
167 }
168
169 func RunSchedLocalQueueEmptyTest(iters int) {
170
171
172
173
174 done := make(chan bool, 1)
175 p := new(p)
176 gs := make([]g, 2)
177 Escape(gs)
178 ready := new(uint32)
179 for i := 0; i < iters; i++ {
180 *ready = 0
181 next0 := (i & 1) == 0
182 next1 := (i & 2) == 0
183 runqput(p, &gs[0], next0)
184 go func() {
185 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
186 }
187 if runqempty(p) {
188 println("next:", next0, next1)
189 throw("queue is empty")
190 }
191 done <- true
192 }()
193 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
194 }
195 runqput(p, &gs[1], next1)
196 runqget(p)
197 <-done
198 runqget(p)
199 }
200 }
201
202 var (
203 StringHash = stringHash
204 BytesHash = bytesHash
205 Int32Hash = int32Hash
206 Int64Hash = int64Hash
207 MemHash = memhash
208 MemHash32 = memhash32
209 MemHash64 = memhash64
210 EfaceHash = efaceHash
211 IfaceHash = ifaceHash
212 )
213
214 var UseAeshash = &useAeshash
215
216 func MemclrBytes(b []byte) {
217 s := (*slice)(unsafe.Pointer(&b))
218 memclrNoHeapPointers(s.array, uintptr(s.len))
219 }
220
221 const HashLoad = hashLoad
222
223
224 func GostringW(w []uint16) (s string) {
225 systemstack(func() {
226 s = gostringw(&w[0])
227 })
228 return
229 }
230
231 var Open = open
232 var Close = closefd
233 var Read = read
234 var Write = write
235
236 func Envs() []string { return envs }
237 func SetEnvs(e []string) { envs = e }
238
239 const PtrSize = goarch.PtrSize
240
241 const ClobberdeadPtr = clobberdeadPtr
242
243 func Clobberfree() bool {
244 return debug.clobberfree != 0
245 }
246
247 var ForceGCPeriod = &forcegcperiod
248
249
250
251
252 func SetTracebackEnv(level string) {
253 setTraceback(level)
254 traceback_env = traceback_cache
255 }
256
257 var ReadUnaligned32 = readUnaligned32
258 var ReadUnaligned64 = readUnaligned64
259
260 func CountPagesInUse() (pagesInUse, counted uintptr) {
261 stw := stopTheWorld(stwForTestCountPagesInUse)
262
263 pagesInUse = mheap_.pagesInUse.Load()
264
265 for _, s := range mheap_.allspans {
266 if s.state.get() == mSpanInUse {
267 counted += s.npages
268 }
269 }
270
271 startTheWorld(stw)
272
273 return
274 }
275
276 func Blocksampled(cycles, rate int64) bool { return blocksampled(cycles, rate) }
277
278 func Cheaprand() uint32 { return cheaprand() }
279 func Cheaprand64() int64 { return cheaprand64() }
280 func Fastrand() uint32 { return uint32(rand()) }
281 func Fastrand64() uint64 { return rand() }
282 func Fastrandn(n uint32) uint32 { return randn(n) }
283
284 type ProfBuf profBuf
285
286 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
287 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
288 }
289
290 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
291 (*profBuf)(p).write(tag, now, hdr, stk)
292 }
293
294 const (
295 ProfBufBlocking = profBufBlocking
296 ProfBufNonBlocking = profBufNonBlocking
297 )
298
299 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
300 return (*profBuf)(p).read(mode)
301 }
302
303 func (p *ProfBuf) Close() {
304 (*profBuf)(p).close()
305 }
306
307 type CPUStats = cpuStats
308
309 func ReadCPUStats() CPUStats {
310 return work.cpuStats
311 }
312
313 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
314 stw := stopTheWorld(stwForTestReadMetricsSlow)
315
316
317
318 metricsLock()
319 initMetrics()
320
321 systemstack(func() {
322
323
324 getg().racectx = getg().m.curg.racectx
325
326
327
328
329
330
331 readMetricsLocked(samplesp, len, cap)
332
333
334
335
336
337 readmemstats_m(memStats)
338
339
340
341
342 readMetricsLocked(samplesp, len, cap)
343
344
345 getg().racectx = 0
346 })
347 metricsUnlock()
348
349 startTheWorld(stw)
350 }
351
352 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
353
354
355
356 func ReadMemStatsSlow() (base, slow MemStats) {
357 stw := stopTheWorld(stwForTestReadMemStatsSlow)
358
359
360 systemstack(func() {
361
362 getg().m.mallocing++
363
364 readmemstats_m(&base)
365
366
367
368 slow = base
369 slow.Alloc = 0
370 slow.TotalAlloc = 0
371 slow.Mallocs = 0
372 slow.Frees = 0
373 slow.HeapReleased = 0
374 var bySize [gc.NumSizeClasses]struct {
375 Mallocs, Frees uint64
376 }
377
378
379 for _, s := range mheap_.allspans {
380 if s.state.get() != mSpanInUse {
381 continue
382 }
383 if s.isUnusedUserArenaChunk() {
384 continue
385 }
386 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
387 slow.Mallocs++
388 slow.Alloc += uint64(s.elemsize)
389 } else {
390 slow.Mallocs += uint64(s.allocCount)
391 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
392 bySize[sizeclass].Mallocs += uint64(s.allocCount)
393 }
394 }
395
396
397 var m heapStatsDelta
398 memstats.heapStats.unsafeRead(&m)
399
400
401 var smallFree uint64
402 for i := 0; i < gc.NumSizeClasses; i++ {
403 slow.Frees += m.smallFreeCount[i]
404 bySize[i].Frees += m.smallFreeCount[i]
405 bySize[i].Mallocs += m.smallFreeCount[i]
406 smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
407 }
408 slow.Frees += m.tinyAllocCount + m.largeFreeCount
409 slow.Mallocs += slow.Frees
410
411 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
412
413 for i := range slow.BySize {
414 slow.BySize[i].Mallocs = bySize[i].Mallocs
415 slow.BySize[i].Frees = bySize[i].Frees
416 }
417
418 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
419 chunk := mheap_.pages.tryChunkOf(i)
420 if chunk == nil {
421 continue
422 }
423 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
424 slow.HeapReleased += uint64(pg) * pageSize
425 }
426 for _, p := range allp {
427
428 pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
429 slow.HeapReleased += uint64(pg) * pageSize
430 }
431
432 getg().m.mallocing--
433 })
434
435 startTheWorld(stw)
436 return
437 }
438
439
440
441
442 func ShrinkStackAndVerifyFramePointers() {
443 before := stackPoisonCopy
444 defer func() { stackPoisonCopy = before }()
445 stackPoisonCopy = 1
446
447 gp := getg()
448 systemstack(func() {
449 shrinkstack(gp)
450 })
451
452
453 FPCallers(make([]uintptr, 1024))
454 }
455
456
457
458
459 func BlockOnSystemStack() {
460 systemstack(blockOnSystemStackInternal)
461 }
462
463 func blockOnSystemStackInternal() {
464 print("x\n")
465 lock(&deadlock)
466 lock(&deadlock)
467 }
468
469 type RWMutex struct {
470 rw rwmutex
471 }
472
473 func (rw *RWMutex) Init() {
474 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
475 }
476
477 func (rw *RWMutex) RLock() {
478 rw.rw.rlock()
479 }
480
481 func (rw *RWMutex) RUnlock() {
482 rw.rw.runlock()
483 }
484
485 func (rw *RWMutex) Lock() {
486 rw.rw.lock()
487 }
488
489 func (rw *RWMutex) Unlock() {
490 rw.rw.unlock()
491 }
492
493 func LockOSCounts() (external, internal uint32) {
494 gp := getg()
495 if gp.m.lockedExt+gp.m.lockedInt == 0 {
496 if gp.lockedm != 0 {
497 panic("lockedm on non-locked goroutine")
498 }
499 } else {
500 if gp.lockedm == 0 {
501 panic("nil lockedm on locked goroutine")
502 }
503 }
504 return gp.m.lockedExt, gp.m.lockedInt
505 }
506
507
508 func TracebackSystemstack(stk []uintptr, i int) int {
509 if i == 0 {
510 pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
511 var u unwinder
512 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
513 return tracebackPCs(&u, 0, stk)
514 }
515 n := 0
516 systemstack(func() {
517 n = TracebackSystemstack(stk, i-1)
518 })
519 return n
520 }
521
522 func KeepNArenaHints(n int) {
523 hint := mheap_.arenaHints
524 for i := 1; i < n; i++ {
525 hint = hint.next
526 if hint == nil {
527 return
528 }
529 }
530 hint.next = nil
531 }
532
533
534
535
536
537
538
539 func MapNextArenaHint() (start, end uintptr, ok bool) {
540 hint := mheap_.arenaHints
541 addr := hint.addr
542 if hint.down {
543 start, end = addr-heapArenaBytes, addr
544 addr -= physPageSize
545 } else {
546 start, end = addr, addr+heapArenaBytes
547 }
548 got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
549 ok = (addr == uintptr(got))
550 if !ok {
551
552
553 sysFreeOS(got, physPageSize)
554 }
555 return
556 }
557
558 func NextArenaHint() (uintptr, bool) {
559 if mheap_.arenaHints == nil {
560 return 0, false
561 }
562 return mheap_.arenaHints.addr, true
563 }
564
565 type G = g
566
567 type Sudog = sudog
568
569 type XRegPerG = xRegPerG
570
571 func Getg() *G {
572 return getg()
573 }
574
575 func Goid() uint64 {
576 return getg().goid
577 }
578
579 func GIsWaitingOnMutex(gp *G) bool {
580 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
581 }
582
583 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
584
585
586 func PanicForTesting(b []byte, i int) byte {
587 return unexportedPanicForTesting(b, i)
588 }
589
590
591 func unexportedPanicForTesting(b []byte, i int) byte {
592 return b[i]
593 }
594
595 func G0StackOverflow() {
596 systemstack(func() {
597 g0 := getg()
598 sp := sys.GetCallerSP()
599
600
601
602 g0.stack.lo = sp - 4096 - stackSystem
603 g0.stackguard0 = g0.stack.lo + stackGuard
604 g0.stackguard1 = g0.stackguard0
605
606 stackOverflow(nil)
607 })
608 }
609
610 func stackOverflow(x *byte) {
611 var buf [256]byte
612 stackOverflow(&buf[0])
613 }
614
615 func RunGetgThreadSwitchTest() {
616
617
618
619
620
621
622 ch := make(chan int)
623 go func(ch chan int) {
624 ch <- 5
625 LockOSThread()
626 }(ch)
627
628 g1 := getg()
629
630
631
632
633
634 <-ch
635
636 g2 := getg()
637 if g1 != g2 {
638 panic("g1 != g2")
639 }
640
641
642
643 g3 := getg()
644 if g1 != g3 {
645 panic("g1 != g3")
646 }
647 }
648
649
650 func Freegc(p unsafe.Pointer, size uintptr, noscan bool) {
651 freegc(p, size, noscan)
652 }
653
654
655 func AssistCredit() int64 {
656 assistG := getg()
657 if assistG.m.curg != nil {
658 assistG = assistG.m.curg
659 }
660 return assistG.gcAssistBytes
661 }
662
663
664 func GcBlackenEnable() bool {
665
666
667
668
669
670 return gcBlackenEnabled != 0
671 }
672
673 const SizeSpecializedMallocEnabled = sizeSpecializedMallocEnabled
674
675 const RuntimeFreegcEnabled = runtimeFreegcEnabled
676
677 const (
678 PageSize = pageSize
679 PallocChunkPages = pallocChunkPages
680 PageAlloc64Bit = pageAlloc64Bit
681 PallocSumBytes = pallocSumBytes
682 )
683
684
685 type PallocSum pallocSum
686
687 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
688 func (m PallocSum) Start() uint { return pallocSum(m).start() }
689 func (m PallocSum) Max() uint { return pallocSum(m).max() }
690 func (m PallocSum) End() uint { return pallocSum(m).end() }
691
692
693 type PallocBits pallocBits
694
695 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
696 return (*pallocBits)(b).find(npages, searchIdx)
697 }
698 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
699 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
700 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
701 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
702
703
704
705 func SummarizeSlow(b *PallocBits) PallocSum {
706 var start, most, end uint
707
708 const N = uint(len(b)) * 64
709 for start < N && (*pageBits)(b).get(start) == 0 {
710 start++
711 }
712 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
713 end++
714 }
715 run := uint(0)
716 for i := uint(0); i < N; i++ {
717 if (*pageBits)(b).get(i) == 0 {
718 run++
719 } else {
720 run = 0
721 }
722 most = max(most, run)
723 }
724 return PackPallocSum(start, most, end)
725 }
726
727
728 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
729
730
731
732 func DiffPallocBits(a, b *PallocBits) []BitRange {
733 ba := (*pageBits)(a)
734 bb := (*pageBits)(b)
735
736 var d []BitRange
737 base, size := uint(0), uint(0)
738 for i := uint(0); i < uint(len(ba))*64; i++ {
739 if ba.get(i) != bb.get(i) {
740 if size == 0 {
741 base = i
742 }
743 size++
744 } else {
745 if size != 0 {
746 d = append(d, BitRange{base, size})
747 }
748 size = 0
749 }
750 }
751 if size != 0 {
752 d = append(d, BitRange{base, size})
753 }
754 return d
755 }
756
757
758
759
760 func StringifyPallocBits(b *PallocBits, r BitRange) string {
761 str := ""
762 for j := r.I; j < r.I+r.N; j++ {
763 if (*pageBits)(b).get(j) != 0 {
764 str += "1"
765 } else {
766 str += "0"
767 }
768 }
769 return str
770 }
771
772
773 type PallocData pallocData
774
775 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
776 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
777 }
778 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
779 func (d *PallocData) ScavengedSetRange(i, n uint) {
780 (*pallocData)(d).scavenged.setRange(i, n)
781 }
782 func (d *PallocData) PallocBits() *PallocBits {
783 return (*PallocBits)(&(*pallocData)(d).pallocBits)
784 }
785 func (d *PallocData) Scavenged() *PallocBits {
786 return (*PallocBits)(&(*pallocData)(d).scavenged)
787 }
788
789
790 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
791
792
793 type PageCache pageCache
794
795 const PageCachePages = pageCachePages
796
797 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
798 return PageCache(pageCache{base: base, cache: cache, scav: scav})
799 }
800 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
801 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
802 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
803 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
804 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
805 return (*pageCache)(c).alloc(npages)
806 }
807 func (c *PageCache) Flush(s *PageAlloc) {
808 cp := (*pageCache)(c)
809 sp := (*pageAlloc)(s)
810
811 systemstack(func() {
812
813
814 lock(sp.mheapLock)
815 cp.flush(sp)
816 unlock(sp.mheapLock)
817 })
818 }
819
820
821 type ChunkIdx chunkIdx
822
823
824
825 type PageAlloc pageAlloc
826
827 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
828 pp := (*pageAlloc)(p)
829
830 var addr, scav uintptr
831 systemstack(func() {
832
833
834 lock(pp.mheapLock)
835 addr, scav = pp.alloc(npages)
836 unlock(pp.mheapLock)
837 })
838 return addr, scav
839 }
840 func (p *PageAlloc) AllocToCache() PageCache {
841 pp := (*pageAlloc)(p)
842
843 var c PageCache
844 systemstack(func() {
845
846
847 lock(pp.mheapLock)
848 c = PageCache(pp.allocToCache())
849 unlock(pp.mheapLock)
850 })
851 return c
852 }
853 func (p *PageAlloc) Free(base, npages uintptr) {
854 pp := (*pageAlloc)(p)
855
856 systemstack(func() {
857
858
859 lock(pp.mheapLock)
860 pp.free(base, npages)
861 unlock(pp.mheapLock)
862 })
863 }
864 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
865 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
866 }
867 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
868 pp := (*pageAlloc)(p)
869 systemstack(func() {
870 r = pp.scavenge(nbytes, nil, true)
871 })
872 return
873 }
874 func (p *PageAlloc) InUse() []AddrRange {
875 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
876 for _, r := range p.inUse.ranges {
877 ranges = append(ranges, AddrRange{r})
878 }
879 return ranges
880 }
881
882
883 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
884 ci := chunkIdx(i)
885 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
886 }
887
888
889 type AddrRange struct {
890 addrRange
891 }
892
893
894 func MakeAddrRange(base, limit uintptr) AddrRange {
895 return AddrRange{makeAddrRange(base, limit)}
896 }
897
898
899 func (a AddrRange) Base() uintptr {
900 return a.addrRange.base.addr()
901 }
902
903
904 func (a AddrRange) Limit() uintptr {
905 return a.addrRange.limit.addr()
906 }
907
908
909 func (a AddrRange) Equals(b AddrRange) bool {
910 return a == b
911 }
912
913
914 func (a AddrRange) Size() uintptr {
915 return a.addrRange.size()
916 }
917
918
919
920
921
922 var testSysStat = &memstats.other_sys
923
924
925 type AddrRanges struct {
926 addrRanges
927 mutable bool
928 }
929
930
931
932
933
934
935
936
937
938
939 func NewAddrRanges() AddrRanges {
940 r := addrRanges{}
941 r.init(testSysStat)
942 return AddrRanges{r, true}
943 }
944
945
946
947
948
949
950 func MakeAddrRanges(a ...AddrRange) AddrRanges {
951
952
953
954
955
956 ranges := make([]addrRange, 0, len(a))
957 total := uintptr(0)
958 for _, r := range a {
959 ranges = append(ranges, r.addrRange)
960 total += r.Size()
961 }
962 return AddrRanges{addrRanges{
963 ranges: ranges,
964 totalBytes: total,
965 sysStat: testSysStat,
966 }, false}
967 }
968
969
970
971 func (a *AddrRanges) Ranges() []AddrRange {
972 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
973 for _, r := range a.addrRanges.ranges {
974 result = append(result, AddrRange{r})
975 }
976 return result
977 }
978
979
980
981 func (a *AddrRanges) FindSucc(base uintptr) int {
982 return a.findSucc(base)
983 }
984
985
986
987
988
989 func (a *AddrRanges) Add(r AddrRange) {
990 if !a.mutable {
991 throw("attempt to mutate immutable AddrRanges")
992 }
993 a.add(r.addrRange)
994 }
995
996
997 func (a *AddrRanges) TotalBytes() uintptr {
998 return a.addrRanges.totalBytes
999 }
1000
1001
1002 type BitRange struct {
1003 I, N uint
1004 }
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1021 p := new(pageAlloc)
1022
1023
1024 p.init(new(mutex), testSysStat, true)
1025 lockInit(p.mheapLock, lockRankMheap)
1026 for i, init := range chunks {
1027 addr := chunkBase(chunkIdx(i))
1028
1029
1030 systemstack(func() {
1031 lock(p.mheapLock)
1032 p.grow(addr, pallocChunkBytes)
1033 unlock(p.mheapLock)
1034 })
1035
1036
1037 ci := chunkIndex(addr)
1038 chunk := p.chunkOf(ci)
1039
1040
1041 chunk.scavenged.clearRange(0, pallocChunkPages)
1042
1043
1044
1045
1046 p.scav.index.alloc(ci, pallocChunkPages)
1047 p.scav.index.free(ci, 0, pallocChunkPages)
1048
1049
1050 if scav != nil {
1051 if scvg, ok := scav[i]; ok {
1052 for _, s := range scvg {
1053
1054
1055 if s.N != 0 {
1056 chunk.scavenged.setRange(s.I, s.N)
1057 }
1058 }
1059 }
1060 }
1061
1062
1063 for _, s := range init {
1064
1065
1066 if s.N != 0 {
1067 chunk.allocRange(s.I, s.N)
1068
1069
1070 p.scav.index.alloc(ci, s.N)
1071 }
1072 }
1073
1074
1075 systemstack(func() {
1076 lock(p.mheapLock)
1077 p.update(addr, pallocChunkPages, false, false)
1078 unlock(p.mheapLock)
1079 })
1080 }
1081
1082 return (*PageAlloc)(p)
1083 }
1084
1085
1086
1087
1088 func FreePageAlloc(pp *PageAlloc) {
1089 p := (*pageAlloc)(pp)
1090
1091
1092 if pageAlloc64Bit != 0 {
1093 for l := 0; l < summaryLevels; l++ {
1094 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1095 }
1096 } else {
1097 resSize := uintptr(0)
1098 for _, s := range p.summary {
1099 resSize += uintptr(cap(s)) * pallocSumBytes
1100 }
1101 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1102 }
1103
1104
1105 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1106
1107
1108
1109
1110
1111 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1112 testSysStat.add(-int64(p.summaryMappedReady))
1113
1114
1115 for i := range p.chunks {
1116 if x := p.chunks[i]; x != nil {
1117 p.chunks[i] = nil
1118
1119 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1120 }
1121 }
1122 }
1123
1124
1125
1126
1127
1128
1129
1130 var BaseChunkIdx = func() ChunkIdx {
1131 var prefix uintptr
1132 if pageAlloc64Bit != 0 {
1133 prefix = 0xc000
1134 } else {
1135 prefix = 0x100
1136 }
1137 baseAddr := prefix * pallocChunkBytes
1138 if goos.IsAix != 0 {
1139 baseAddr += arenaBaseOffset
1140 }
1141 return ChunkIdx(chunkIndex(baseAddr))
1142 }()
1143
1144
1145
1146 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1147 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1148 }
1149
1150 type BitsMismatch struct {
1151 Base uintptr
1152 Got, Want uint64
1153 }
1154
1155 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1156 ok = true
1157
1158
1159 systemstack(func() {
1160 getg().m.mallocing++
1161
1162
1163 lock(&mheap_.lock)
1164
1165 chunkLoop:
1166 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1167 chunk := mheap_.pages.tryChunkOf(i)
1168 if chunk == nil {
1169 continue
1170 }
1171 cb := chunkBase(i)
1172 for j := 0; j < pallocChunkPages/64; j++ {
1173
1174
1175
1176
1177
1178 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1179 got := chunk.scavenged[j]
1180 if want != got {
1181 ok = false
1182 if n >= len(mismatches) {
1183 break chunkLoop
1184 }
1185 mismatches[n] = BitsMismatch{
1186 Base: cb + uintptr(j)*64*pageSize,
1187 Got: got,
1188 Want: want,
1189 }
1190 n++
1191 }
1192 }
1193 }
1194 unlock(&mheap_.lock)
1195
1196 getg().m.mallocing--
1197 })
1198
1199 if randomizeHeapBase && len(mismatches) > 0 {
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 affectedArenas := map[arenaIdx]bool{}
1215 for _, mismatch := range mismatches {
1216 if mismatch.Base > 0 {
1217 affectedArenas[arenaIndex(mismatch.Base)] = true
1218 }
1219 }
1220 if len(affectedArenas) == 1 {
1221 ok = true
1222
1223 for i := range n {
1224 mismatches[i] = BitsMismatch{}
1225 }
1226 }
1227 }
1228
1229 return
1230 }
1231
1232 func PageCachePagesLeaked() (leaked uintptr) {
1233 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1234
1235
1236 deadp := allp[len(allp):cap(allp)]
1237 for _, p := range deadp {
1238
1239
1240 if p != nil {
1241 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1242 }
1243 }
1244
1245 startTheWorld(stw)
1246 return
1247 }
1248
1249 var ProcYield = procyield
1250 var OSYield = osyield
1251
1252 type Mutex = mutex
1253
1254 var Lock = lock
1255 var Unlock = unlock
1256
1257 var MutexContended = mutexContended
1258
1259 func SemRootLock(addr *uint32) *mutex {
1260 root := semtable.rootFor(addr)
1261 return &root.lock
1262 }
1263
1264 var Semacquire = semacquire
1265 var Semrelease1 = semrelease1
1266
1267 func SemNwait(addr *uint32) uint32 {
1268 root := semtable.rootFor(addr)
1269 return root.nwait.Load()
1270 }
1271
1272 const SemTableSize = semTabSize
1273
1274
1275 type SemTable struct {
1276 semTable
1277 }
1278
1279
1280 func (t *SemTable) Enqueue(addr *uint32) {
1281 s := acquireSudog()
1282 s.releasetime = 0
1283 s.acquiretime = 0
1284 s.ticket = 0
1285 t.semTable.rootFor(addr).queue(addr, s, false)
1286 }
1287
1288
1289
1290
1291 func (t *SemTable) Dequeue(addr *uint32) bool {
1292 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1293 if s != nil {
1294 releaseSudog(s)
1295 return true
1296 }
1297 return false
1298 }
1299
1300
1301 type MSpan mspan
1302
1303
1304 func AllocMSpan() *MSpan {
1305 var s *mspan
1306 systemstack(func() {
1307 lock(&mheap_.lock)
1308 s = (*mspan)(mheap_.spanalloc.alloc())
1309 s.init(0, 0)
1310 unlock(&mheap_.lock)
1311 })
1312 return (*MSpan)(s)
1313 }
1314
1315
1316 func FreeMSpan(s *MSpan) {
1317 systemstack(func() {
1318 lock(&mheap_.lock)
1319 mheap_.spanalloc.free(unsafe.Pointer(s))
1320 unlock(&mheap_.lock)
1321 })
1322 }
1323
1324 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1325 s := (*mspan)(ms)
1326 s.nelems = uint16(len(bits) * 8)
1327 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1328 result := s.countAlloc()
1329 s.gcmarkBits = nil
1330 return result
1331 }
1332
1333 const (
1334 TimeHistSubBucketBits = timeHistSubBucketBits
1335 TimeHistNumSubBuckets = timeHistNumSubBuckets
1336 TimeHistNumBuckets = timeHistNumBuckets
1337 TimeHistMinBucketBits = timeHistMinBucketBits
1338 TimeHistMaxBucketBits = timeHistMaxBucketBits
1339 )
1340
1341 type TimeHistogram timeHistogram
1342
1343
1344
1345
1346
1347 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1348 t := (*timeHistogram)(th)
1349 if bucket < 0 {
1350 return t.underflow.Load(), false
1351 }
1352 i := bucket*TimeHistNumSubBuckets + subBucket
1353 if i >= len(t.counts) {
1354 return t.overflow.Load(), false
1355 }
1356 return t.counts[i].Load(), true
1357 }
1358
1359 func (th *TimeHistogram) Record(duration int64) {
1360 (*timeHistogram)(th).record(duration)
1361 }
1362
1363 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1364
1365 func SetIntArgRegs(a int) int {
1366 lock(&finlock)
1367 old := intArgRegs
1368 if a >= 0 {
1369 intArgRegs = a
1370 }
1371 unlock(&finlock)
1372 return old
1373 }
1374
1375 func FinalizerGAsleep() bool {
1376 return fingStatus.Load()&fingWait != 0
1377 }
1378
1379
1380
1381
1382 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1383
1384
1385
1386 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1387 return gcTestIsReachable(ptrs...)
1388 }
1389
1390
1391
1392
1393
1394
1395
1396 func GCTestPointerClass(p unsafe.Pointer) string {
1397 return gcTestPointerClass(p)
1398 }
1399
1400 const Raceenabled = raceenabled
1401
1402 const (
1403 GCBackgroundUtilization = gcBackgroundUtilization
1404 GCGoalUtilization = gcGoalUtilization
1405 DefaultHeapMinimum = defaultHeapMinimum
1406 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1407 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1408 )
1409
1410 type GCController struct {
1411 gcControllerState
1412 }
1413
1414 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1415
1416
1417
1418
1419 g := Escape(new(GCController))
1420 g.gcControllerState.test = true
1421 g.init(int32(gcPercent), memoryLimit)
1422 return g
1423 }
1424
1425 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1426 trigger, _ := c.trigger()
1427 if c.heapMarked > trigger {
1428 trigger = c.heapMarked
1429 }
1430 c.maxStackScan.Store(stackSize)
1431 c.globalsScan.Store(globalsSize)
1432 c.heapLive.Store(trigger)
1433 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1434 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1435 }
1436
1437 func (c *GCController) AssistWorkPerByte() float64 {
1438 return c.assistWorkPerByte.Load()
1439 }
1440
1441 func (c *GCController) HeapGoal() uint64 {
1442 return c.heapGoal()
1443 }
1444
1445 func (c *GCController) HeapLive() uint64 {
1446 return c.heapLive.Load()
1447 }
1448
1449 func (c *GCController) HeapMarked() uint64 {
1450 return c.heapMarked
1451 }
1452
1453 func (c *GCController) Triggered() uint64 {
1454 return c.triggered
1455 }
1456
1457 type GCControllerReviseDelta struct {
1458 HeapLive int64
1459 HeapScan int64
1460 HeapScanWork int64
1461 StackScanWork int64
1462 GlobalsScanWork int64
1463 }
1464
1465 func (c *GCController) Revise(d GCControllerReviseDelta) {
1466 c.heapLive.Add(d.HeapLive)
1467 c.heapScan.Add(d.HeapScan)
1468 c.heapScanWork.Add(d.HeapScanWork)
1469 c.stackScanWork.Add(d.StackScanWork)
1470 c.globalsScanWork.Add(d.GlobalsScanWork)
1471 c.revise()
1472 }
1473
1474 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1475 c.assistTime.Store(assistTime)
1476 c.endCycle(elapsed, gomaxprocs, false)
1477 c.resetLive(bytesMarked)
1478 c.commit(false)
1479 }
1480
1481 func (c *GCController) AddIdleMarkWorker() bool {
1482 return c.addIdleMarkWorker()
1483 }
1484
1485 func (c *GCController) NeedIdleMarkWorker() bool {
1486 return c.needIdleMarkWorker()
1487 }
1488
1489 func (c *GCController) RemoveIdleMarkWorker() {
1490 c.removeIdleMarkWorker()
1491 }
1492
1493 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1494 c.setMaxIdleMarkWorkers(max)
1495 }
1496
1497 var alwaysFalse bool
1498 var escapeSink any
1499
1500 func Escape[T any](x T) T {
1501 if alwaysFalse {
1502 escapeSink = x
1503 }
1504 return x
1505 }
1506
1507
1508 func Acquirem() {
1509 acquirem()
1510 }
1511
1512 func Releasem() {
1513 releasem(getg().m)
1514 }
1515
1516
1517
1518
1519
1520
1521 func GoschedIfBusy() {
1522 goschedIfBusy()
1523 }
1524
1525 type PIController struct {
1526 piController
1527 }
1528
1529 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1530 return &PIController{piController{
1531 kp: kp,
1532 ti: ti,
1533 tt: tt,
1534 min: min,
1535 max: max,
1536 }}
1537 }
1538
1539 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1540 return c.piController.next(input, setpoint, period)
1541 }
1542
1543 const (
1544 CapacityPerProc = capacityPerProc
1545 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1546 )
1547
1548 type GCCPULimiter struct {
1549 limiter gcCPULimiterState
1550 }
1551
1552 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1553
1554
1555
1556
1557 l := Escape(new(GCCPULimiter))
1558 l.limiter.test = true
1559 l.limiter.resetCapacity(now, gomaxprocs)
1560 return l
1561 }
1562
1563 func (l *GCCPULimiter) Fill() uint64 {
1564 return l.limiter.bucket.fill
1565 }
1566
1567 func (l *GCCPULimiter) Capacity() uint64 {
1568 return l.limiter.bucket.capacity
1569 }
1570
1571 func (l *GCCPULimiter) Overflow() uint64 {
1572 return l.limiter.overflow
1573 }
1574
1575 func (l *GCCPULimiter) Limiting() bool {
1576 return l.limiter.limiting()
1577 }
1578
1579 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1580 return l.limiter.needUpdate(now)
1581 }
1582
1583 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1584 l.limiter.startGCTransition(enableGC, now)
1585 }
1586
1587 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1588 l.limiter.finishGCTransition(now)
1589 }
1590
1591 func (l *GCCPULimiter) Update(now int64) {
1592 l.limiter.update(now)
1593 }
1594
1595 func (l *GCCPULimiter) AddAssistTime(t int64) {
1596 l.limiter.addAssistTime(t)
1597 }
1598
1599 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1600 l.limiter.resetCapacity(now, nprocs)
1601 }
1602
1603 const ScavengePercent = scavengePercent
1604
1605 type Scavenger struct {
1606 Sleep func(int64) int64
1607 Scavenge func(uintptr) (uintptr, int64)
1608 ShouldStop func() bool
1609 GoMaxProcs func() int32
1610
1611 released atomic.Uintptr
1612 scavenger scavengerState
1613 stop chan<- struct{}
1614 done <-chan struct{}
1615 }
1616
1617 func (s *Scavenger) Start() {
1618 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1619 panic("must populate all stubs")
1620 }
1621
1622
1623 s.scavenger.sleepStub = s.Sleep
1624 s.scavenger.scavenge = s.Scavenge
1625 s.scavenger.shouldStop = s.ShouldStop
1626 s.scavenger.gomaxprocs = s.GoMaxProcs
1627
1628
1629 stop := make(chan struct{})
1630 s.stop = stop
1631 done := make(chan struct{})
1632 s.done = done
1633 go func() {
1634
1635 s.scavenger.init()
1636 s.scavenger.park()
1637 for {
1638 select {
1639 case <-stop:
1640 close(done)
1641 return
1642 default:
1643 }
1644 released, workTime := s.scavenger.run()
1645 if released == 0 {
1646 s.scavenger.park()
1647 continue
1648 }
1649 s.released.Add(released)
1650 s.scavenger.sleep(workTime)
1651 }
1652 }()
1653 if !s.BlockUntilParked(1e9 ) {
1654 panic("timed out waiting for scavenger to get ready")
1655 }
1656 }
1657
1658
1659
1660
1661
1662
1663
1664 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1665
1666
1667
1668
1669
1670 start := nanotime()
1671 for nanotime()-start < timeout {
1672 lock(&s.scavenger.lock)
1673 parked := s.scavenger.parked
1674 unlock(&s.scavenger.lock)
1675 if parked {
1676 return true
1677 }
1678 Gosched()
1679 }
1680 return false
1681 }
1682
1683
1684 func (s *Scavenger) Released() uintptr {
1685 return s.released.Load()
1686 }
1687
1688
1689 func (s *Scavenger) Wake() {
1690 s.scavenger.wake()
1691 }
1692
1693
1694
1695 func (s *Scavenger) Stop() {
1696 lock(&s.scavenger.lock)
1697 parked := s.scavenger.parked
1698 unlock(&s.scavenger.lock)
1699 if !parked {
1700 panic("tried to clean up scavenger that is not parked")
1701 }
1702 close(s.stop)
1703 s.Wake()
1704 <-s.done
1705 }
1706
1707 type ScavengeIndex struct {
1708 i scavengeIndex
1709 }
1710
1711 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1712 s := new(ScavengeIndex)
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724 s.i.chunks = make([]atomicScavChunkData, max)
1725 s.i.min.Store(uintptr(min))
1726 s.i.max.Store(uintptr(max))
1727 s.i.minHeapIdx.Store(uintptr(min))
1728 s.i.test = true
1729 return s
1730 }
1731
1732 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1733 ci, off := s.i.find(force)
1734 return ChunkIdx(ci), off
1735 }
1736
1737 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1738 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1739 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1740
1741 if sc == ec {
1742
1743 s.i.alloc(sc, ei+1-si)
1744 } else {
1745
1746 s.i.alloc(sc, pallocChunkPages-si)
1747 for c := sc + 1; c < ec; c++ {
1748 s.i.alloc(c, pallocChunkPages)
1749 }
1750 s.i.alloc(ec, ei+1)
1751 }
1752 }
1753
1754 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1755 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1756 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1757
1758 if sc == ec {
1759
1760 s.i.free(sc, si, ei+1-si)
1761 } else {
1762
1763 s.i.free(sc, si, pallocChunkPages-si)
1764 for c := sc + 1; c < ec; c++ {
1765 s.i.free(c, 0, pallocChunkPages)
1766 }
1767 s.i.free(ec, 0, ei+1)
1768 }
1769 }
1770
1771 func (s *ScavengeIndex) ResetSearchAddrs() {
1772 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1773 addr, marked := a.Load()
1774 if marked {
1775 a.StoreUnmark(addr, addr)
1776 }
1777 a.Clear()
1778 }
1779 s.i.freeHWM = minOffAddr
1780 }
1781
1782 func (s *ScavengeIndex) NextGen() {
1783 s.i.nextGen()
1784 }
1785
1786 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1787 s.i.setEmpty(chunkIdx(ci))
1788 }
1789
1790 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1791 sc0 := scavChunkData{
1792 gen: gen,
1793 inUse: inUse,
1794 lastInUse: lastInUse,
1795 scavChunkFlags: scavChunkFlags(flags),
1796 }
1797 scp := sc0.pack()
1798 sc1 := unpackScavChunkData(scp)
1799 return sc0 == sc1
1800 }
1801
1802 const GTrackingPeriod = gTrackingPeriod
1803
1804 var ZeroBase = unsafe.Pointer(&zerobase)
1805
1806 const UserArenaChunkBytes = userArenaChunkBytes
1807
1808 type UserArena struct {
1809 arena *userArena
1810 }
1811
1812 func NewUserArena() *UserArena {
1813 return &UserArena{newUserArena()}
1814 }
1815
1816 func (a *UserArena) New(out *any) {
1817 i := efaceOf(out)
1818 typ := i._type
1819 if typ.Kind() != abi.Pointer {
1820 panic("new result of non-ptr type")
1821 }
1822 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1823 i.data = a.arena.new(typ)
1824 }
1825
1826 func (a *UserArena) Slice(sl any, cap int) {
1827 a.arena.slice(sl, cap)
1828 }
1829
1830 func (a *UserArena) Free() {
1831 a.arena.free()
1832 }
1833
1834 func GlobalWaitingArenaChunks() int {
1835 n := 0
1836 systemstack(func() {
1837 lock(&mheap_.lock)
1838 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1839 n++
1840 }
1841 unlock(&mheap_.lock)
1842 })
1843 return n
1844 }
1845
1846 func UserArenaClone[T any](s T) T {
1847 return arena_heapify(s).(T)
1848 }
1849
1850 var AlignUp = alignUp
1851
1852 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1853 return blockUntilEmptyFinalizerQueue(timeout)
1854 }
1855
1856 func BlockUntilEmptyCleanupQueue(timeout int64) bool {
1857 return gcCleanups.blockUntilEmpty(timeout)
1858 }
1859
1860 func FrameStartLine(f *Frame) int {
1861 return f.startLine
1862 }
1863
1864
1865
1866 func PersistentAlloc(n, align uintptr) unsafe.Pointer {
1867 return persistentalloc(n, align, &memstats.other_sys)
1868 }
1869
1870 const TagAlign = tagAlign
1871
1872
1873
1874 func FPCallers(pcBuf []uintptr) int {
1875 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1876 }
1877
1878 const FramePointerEnabled = framepointer_enabled
1879
1880 var (
1881 IsPinned = isPinned
1882 GetPinCounter = pinnerGetPinCounter
1883 )
1884
1885 func SetPinnerLeakPanic(f func()) {
1886 pinnerLeakPanic = f
1887 }
1888 func GetPinnerLeakPanic() func() {
1889 return pinnerLeakPanic
1890 }
1891
1892 var testUintptr uintptr
1893
1894 func MyGenericFunc[T any]() {
1895 systemstack(func() {
1896 testUintptr = 4
1897 })
1898 }
1899
1900 func UnsafePoint(pc uintptr) bool {
1901 fi := findfunc(pc)
1902 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1903 switch v {
1904 case abi.UnsafePointUnsafe:
1905 return true
1906 case abi.UnsafePointSafe:
1907 return false
1908 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1909
1910
1911 return false
1912 default:
1913 var buf [20]byte
1914 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1915 }
1916 }
1917
1918 type TraceMap struct {
1919 traceMap
1920 }
1921
1922 func (m *TraceMap) PutString(s string) (uint64, bool) {
1923 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1924 }
1925
1926 func (m *TraceMap) Reset() {
1927 m.traceMap.reset()
1928 }
1929
1930 func SetSpinInGCMarkDone(spin bool) {
1931 gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
1932 }
1933
1934 func GCMarkDoneRestarted() bool {
1935
1936 mp := acquirem()
1937 if gcphase != _GCoff {
1938 releasem(mp)
1939 return false
1940 }
1941 restarted := gcDebugMarkDone.restartedDueTo27993
1942 releasem(mp)
1943 return restarted
1944 }
1945
1946 func GCMarkDoneResetRestartFlag() {
1947 mp := acquirem()
1948 for gcphase != _GCoff {
1949 releasem(mp)
1950 Gosched()
1951 mp = acquirem()
1952 }
1953 gcDebugMarkDone.restartedDueTo27993 = false
1954 releasem(mp)
1955 }
1956
1957 type BitCursor struct {
1958 b bitCursor
1959 }
1960
1961 func NewBitCursor(buf *byte) BitCursor {
1962 return BitCursor{b: bitCursor{ptr: buf, n: 0}}
1963 }
1964
1965 func (b BitCursor) Write(data *byte, cnt uintptr) {
1966 b.b.write(data, cnt)
1967 }
1968 func (b BitCursor) Offset(cnt uintptr) BitCursor {
1969 return BitCursor{b: b.b.offset(cnt)}
1970 }
1971
1972 const (
1973 BubbleAssocUnbubbled = bubbleAssocUnbubbled
1974 BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
1975 BubbleAssocOtherBubble = bubbleAssocOtherBubble
1976 )
1977
1978 type TraceStackTable traceStackTable
1979
1980 func (t *TraceStackTable) Reset() {
1981 t.tab.reset()
1982 }
1983
1984 func TraceStack(gp *G, tab *TraceStackTable) {
1985 traceStack(0, gp, (*traceStackTable)(tab))
1986 }
1987
1988 var X86HasAVX = &x86HasAVX
1989
1990 var DebugDecorateMappings = &debug.decoratemappings
1991
1992 func SetVMANameSupported() bool { return setVMANameSupported() }
1993
1994 type ListHead struct {
1995 l listHead
1996 }
1997
1998 func (head *ListHead) Init(off uintptr) {
1999 head.l.init(off)
2000 }
2001
2002 type ListNode struct {
2003 l listNode
2004 }
2005
2006 func (head *ListHead) Push(p unsafe.Pointer) {
2007 head.l.push(p)
2008 }
2009
2010 func (head *ListHead) Pop() unsafe.Pointer {
2011 return head.l.pop()
2012 }
2013
2014 func (head *ListHead) Remove(p unsafe.Pointer) {
2015 head.l.remove(p)
2016 }
2017
2018 type ListHeadManual struct {
2019 l listHeadManual
2020 }
2021
2022 func (head *ListHeadManual) Init(off uintptr) {
2023 head.l.init(off)
2024 }
2025
2026 type ListNodeManual struct {
2027 l listNodeManual
2028 }
2029
2030 func (head *ListHeadManual) Push(p unsafe.Pointer) {
2031 head.l.push(p)
2032 }
2033
2034 func (head *ListHeadManual) Pop() unsafe.Pointer {
2035 return head.l.pop()
2036 }
2037
2038 func (head *ListHeadManual) Remove(p unsafe.Pointer) {
2039 head.l.remove(p)
2040 }
2041
2042 func Hexdumper(base uintptr, wordBytes int, mark func(addr uintptr, start func()), data ...[]byte) string {
2043 buf := make([]byte, 0, 2048)
2044 getg().writebuf = buf
2045 h := hexdumper{addr: base, addrBytes: 4, wordBytes: uint8(wordBytes)}
2046 if mark != nil {
2047 h.mark = func(addr uintptr, m hexdumpMarker) {
2048 mark(addr, m.start)
2049 }
2050 }
2051 for _, d := range data {
2052 h.write(d)
2053 }
2054 h.close()
2055 n := len(getg().writebuf)
2056 getg().writebuf = nil
2057 if n == cap(buf) {
2058 panic("Hexdumper buf too small")
2059 }
2060 return string(buf[:n])
2061 }
2062
2063 func HexdumpWords(p, bytes uintptr) string {
2064 buf := make([]byte, 0, 2048)
2065 getg().writebuf = buf
2066 hexdumpWords(p, bytes, nil)
2067 n := len(getg().writebuf)
2068 getg().writebuf = nil
2069 if n == cap(buf) {
2070 panic("HexdumpWords buf too small")
2071 }
2072 return string(buf[:n])
2073 }
2074
2075
2076
2077 func DumpPrintQuoted(s string) string {
2078 gp := getg()
2079 gp.writebuf = make([]byte, 0, 1<<20)
2080 print(quoted(s))
2081 buf := gp.writebuf
2082 gp.writebuf = nil
2083
2084 return string(buf)
2085 }
2086
View as plain text