Source file src/runtime/export_test.go

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/goos"
    13  	"internal/runtime/atomic"
    14  	"internal/runtime/gc"
    15  	"internal/runtime/sys"
    16  	"unsafe"
    17  )
    18  
    19  var Fadd64 = fadd64
    20  var Fsub64 = fsub64
    21  var Fmul64 = fmul64
    22  var Fdiv64 = fdiv64
    23  var F64to32 = f64to32
    24  var F32to64 = f32to64
    25  var Fcmp64 = fcmp64
    26  var Fintto64 = fintto64
    27  var F64toint = f64toint
    28  
    29  var Entersyscall = entersyscall
    30  var Exitsyscall = exitsyscall
    31  var LockedOSThread = lockedOSThread
    32  var Xadduintptr = atomic.Xadduintptr
    33  
    34  var ReadRandomFailed = &readRandomFailed
    35  
    36  var Fastlog2 = fastlog2
    37  
    38  var ParseByteCount = parseByteCount
    39  
    40  var Nanotime = nanotime
    41  var Cputicks = cputicks
    42  var CyclesPerSecond = pprof_cyclesPerSecond
    43  var NetpollBreak = netpollBreak
    44  var Usleep = usleep
    45  
    46  var PhysPageSize = physPageSize
    47  var PhysHugePageSize = physHugePageSize
    48  
    49  var NetpollGenericInit = netpollGenericInit
    50  
    51  var Memmove = memmove
    52  var MemclrNoHeapPointers = memclrNoHeapPointers
    53  
    54  var CgoCheckPointer = cgoCheckPointer
    55  
    56  const CrashStackImplemented = crashStackImplemented
    57  
    58  const TracebackInnerFrames = tracebackInnerFrames
    59  const TracebackOuterFrames = tracebackOuterFrames
    60  
    61  var LockPartialOrder = lockPartialOrder
    62  
    63  type TimeTimer = timeTimer
    64  
    65  type LockRank lockRank
    66  
    67  func (l LockRank) String() string {
    68  	return lockRank(l).String()
    69  }
    70  
    71  const PreemptMSupported = preemptMSupported
    72  
    73  type LFNode struct {
    74  	Next    uint64
    75  	Pushcnt uintptr
    76  }
    77  
    78  func LFStackPush(head *uint64, node *LFNode) {
    79  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    80  }
    81  
    82  func LFStackPop(head *uint64) *LFNode {
    83  	return (*LFNode)((*lfstack)(head).pop())
    84  }
    85  func LFNodeValidate(node *LFNode) {
    86  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    87  }
    88  
    89  func Netpoll(delta int64) {
    90  	systemstack(func() {
    91  		netpoll(delta)
    92  	})
    93  }
    94  
    95  func PointerMask(x any) (ret []byte) {
    96  	systemstack(func() {
    97  		ret = pointerMask(x)
    98  	})
    99  	return
   100  }
   101  
   102  func RunSchedLocalQueueTest() {
   103  	pp := new(p)
   104  	gs := make([]g, len(pp.runq))
   105  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   106  	for i := 0; i < len(pp.runq); i++ {
   107  		if g, _ := runqget(pp); g != nil {
   108  			throw("runq is not empty initially")
   109  		}
   110  		for j := 0; j < i; j++ {
   111  			runqput(pp, &gs[i], false)
   112  		}
   113  		for j := 0; j < i; j++ {
   114  			if g, _ := runqget(pp); g != &gs[i] {
   115  				print("bad element at iter ", i, "/", j, "\n")
   116  				throw("bad element")
   117  			}
   118  		}
   119  		if g, _ := runqget(pp); g != nil {
   120  			throw("runq is not empty afterwards")
   121  		}
   122  	}
   123  }
   124  
   125  func RunSchedLocalQueueStealTest() {
   126  	p1 := new(p)
   127  	p2 := new(p)
   128  	gs := make([]g, len(p1.runq))
   129  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   130  	for i := 0; i < len(p1.runq); i++ {
   131  		for j := 0; j < i; j++ {
   132  			gs[j].sig = 0
   133  			runqput(p1, &gs[j], false)
   134  		}
   135  		gp := runqsteal(p2, p1, true)
   136  		s := 0
   137  		if gp != nil {
   138  			s++
   139  			gp.sig++
   140  		}
   141  		for {
   142  			gp, _ = runqget(p2)
   143  			if gp == nil {
   144  				break
   145  			}
   146  			s++
   147  			gp.sig++
   148  		}
   149  		for {
   150  			gp, _ = runqget(p1)
   151  			if gp == nil {
   152  				break
   153  			}
   154  			gp.sig++
   155  		}
   156  		for j := 0; j < i; j++ {
   157  			if gs[j].sig != 1 {
   158  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   159  				throw("bad element")
   160  			}
   161  		}
   162  		if s != i/2 && s != i/2+1 {
   163  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   164  			throw("bad steal")
   165  		}
   166  	}
   167  }
   168  
   169  func RunSchedLocalQueueEmptyTest(iters int) {
   170  	// Test that runq is not spuriously reported as empty.
   171  	// Runq emptiness affects scheduling decisions and spurious emptiness
   172  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   173  	// for arbitrary long time).
   174  	done := make(chan bool, 1)
   175  	p := new(p)
   176  	gs := make([]g, 2)
   177  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   178  	ready := new(uint32)
   179  	for i := 0; i < iters; i++ {
   180  		*ready = 0
   181  		next0 := (i & 1) == 0
   182  		next1 := (i & 2) == 0
   183  		runqput(p, &gs[0], next0)
   184  		go func() {
   185  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   186  			}
   187  			if runqempty(p) {
   188  				println("next:", next0, next1)
   189  				throw("queue is empty")
   190  			}
   191  			done <- true
   192  		}()
   193  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   194  		}
   195  		runqput(p, &gs[1], next1)
   196  		runqget(p)
   197  		<-done
   198  		runqget(p)
   199  	}
   200  }
   201  
   202  var (
   203  	StringHash = stringHash
   204  	BytesHash  = bytesHash
   205  	Int32Hash  = int32Hash
   206  	Int64Hash  = int64Hash
   207  	MemHash    = memhash
   208  	MemHash32  = memhash32
   209  	MemHash64  = memhash64
   210  	EfaceHash  = efaceHash
   211  	IfaceHash  = ifaceHash
   212  )
   213  
   214  var UseAeshash = &useAeshash
   215  
   216  func MemclrBytes(b []byte) {
   217  	s := (*slice)(unsafe.Pointer(&b))
   218  	memclrNoHeapPointers(s.array, uintptr(s.len))
   219  }
   220  
   221  const HashLoad = hashLoad
   222  
   223  // entry point for testing
   224  func GostringW(w []uint16) (s string) {
   225  	systemstack(func() {
   226  		s = gostringw(&w[0])
   227  	})
   228  	return
   229  }
   230  
   231  var Open = open
   232  var Close = closefd
   233  var Read = read
   234  var Write = write
   235  
   236  func Envs() []string     { return envs }
   237  func SetEnvs(e []string) { envs = e }
   238  
   239  const PtrSize = goarch.PtrSize
   240  
   241  const ClobberdeadPtr = clobberdeadPtr
   242  
   243  func Clobberfree() bool {
   244  	return debug.clobberfree != 0
   245  }
   246  
   247  var ForceGCPeriod = &forcegcperiod
   248  
   249  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   250  // the "environment" traceback level, so later calls to
   251  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   252  func SetTracebackEnv(level string) {
   253  	setTraceback(level)
   254  	traceback_env = traceback_cache
   255  }
   256  
   257  var ReadUnaligned32 = readUnaligned32
   258  var ReadUnaligned64 = readUnaligned64
   259  
   260  func CountPagesInUse() (pagesInUse, counted uintptr) {
   261  	stw := stopTheWorld(stwForTestCountPagesInUse)
   262  
   263  	pagesInUse = mheap_.pagesInUse.Load()
   264  
   265  	for _, s := range mheap_.allspans {
   266  		if s.state.get() == mSpanInUse {
   267  			counted += s.npages
   268  		}
   269  	}
   270  
   271  	startTheWorld(stw)
   272  
   273  	return
   274  }
   275  
   276  func Blocksampled(cycles, rate int64) bool { return blocksampled(cycles, rate) }
   277  
   278  func Cheaprand() uint32         { return cheaprand() }
   279  func Cheaprand64() int64        { return cheaprand64() }
   280  func Fastrand() uint32          { return uint32(rand()) }
   281  func Fastrand64() uint64        { return rand() }
   282  func Fastrandn(n uint32) uint32 { return randn(n) }
   283  
   284  type ProfBuf profBuf
   285  
   286  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   287  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   288  }
   289  
   290  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   291  	(*profBuf)(p).write(tag, now, hdr, stk)
   292  }
   293  
   294  const (
   295  	ProfBufBlocking    = profBufBlocking
   296  	ProfBufNonBlocking = profBufNonBlocking
   297  )
   298  
   299  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   300  	return (*profBuf)(p).read(mode)
   301  }
   302  
   303  func (p *ProfBuf) Close() {
   304  	(*profBuf)(p).close()
   305  }
   306  
   307  type CPUStats = cpuStats
   308  
   309  func ReadCPUStats() CPUStats {
   310  	return work.cpuStats
   311  }
   312  
   313  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   314  	stw := stopTheWorld(stwForTestReadMetricsSlow)
   315  
   316  	// Initialize the metrics beforehand because this could
   317  	// allocate and skew the stats.
   318  	metricsLock()
   319  	initMetrics()
   320  
   321  	systemstack(func() {
   322  		// Donate the racectx to g0. readMetricsLocked calls into the race detector
   323  		// via map access.
   324  		getg().racectx = getg().m.curg.racectx
   325  
   326  		// Read the metrics once before in case it allocates and skews the metrics.
   327  		// readMetricsLocked is designed to only allocate the first time it is called
   328  		// with a given slice of samples. In effect, this extra read tests that this
   329  		// remains true, since otherwise the second readMetricsLocked below could
   330  		// allocate before it returns.
   331  		readMetricsLocked(samplesp, len, cap)
   332  
   333  		// Read memstats first. It's going to flush
   334  		// the mcaches which readMetrics does not do, so
   335  		// going the other way around may result in
   336  		// inconsistent statistics.
   337  		readmemstats_m(memStats)
   338  
   339  		// Read metrics again. We need to be sure we're on the
   340  		// system stack with readmemstats_m so that we don't call into
   341  		// the stack allocator and adjust metrics between there and here.
   342  		readMetricsLocked(samplesp, len, cap)
   343  
   344  		// Undo the donation.
   345  		getg().racectx = 0
   346  	})
   347  	metricsUnlock()
   348  
   349  	startTheWorld(stw)
   350  }
   351  
   352  var DoubleCheckReadMemStats = &doubleCheckReadMemStats
   353  
   354  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   355  // MemStats accumulated by scanning the heap.
   356  func ReadMemStatsSlow() (base, slow MemStats) {
   357  	stw := stopTheWorld(stwForTestReadMemStatsSlow)
   358  
   359  	// Run on the system stack to avoid stack growth allocation.
   360  	systemstack(func() {
   361  		// Make sure stats don't change.
   362  		getg().m.mallocing++
   363  
   364  		readmemstats_m(&base)
   365  
   366  		// Initialize slow from base and zero the fields we're
   367  		// recomputing.
   368  		slow = base
   369  		slow.Alloc = 0
   370  		slow.TotalAlloc = 0
   371  		slow.Mallocs = 0
   372  		slow.Frees = 0
   373  		slow.HeapReleased = 0
   374  		var bySize [gc.NumSizeClasses]struct {
   375  			Mallocs, Frees uint64
   376  		}
   377  
   378  		// Add up current allocations in spans.
   379  		for _, s := range mheap_.allspans {
   380  			if s.state.get() != mSpanInUse {
   381  				continue
   382  			}
   383  			if s.isUnusedUserArenaChunk() {
   384  				continue
   385  			}
   386  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   387  				slow.Mallocs++
   388  				slow.Alloc += uint64(s.elemsize)
   389  			} else {
   390  				slow.Mallocs += uint64(s.allocCount)
   391  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   392  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   393  			}
   394  		}
   395  
   396  		// Add in frees by just reading the stats for those directly.
   397  		var m heapStatsDelta
   398  		memstats.heapStats.unsafeRead(&m)
   399  
   400  		// Collect per-sizeclass free stats.
   401  		var smallFree uint64
   402  		for i := 0; i < gc.NumSizeClasses; i++ {
   403  			slow.Frees += m.smallFreeCount[i]
   404  			bySize[i].Frees += m.smallFreeCount[i]
   405  			bySize[i].Mallocs += m.smallFreeCount[i]
   406  			smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
   407  		}
   408  		slow.Frees += m.tinyAllocCount + m.largeFreeCount
   409  		slow.Mallocs += slow.Frees
   410  
   411  		slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
   412  
   413  		for i := range slow.BySize {
   414  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   415  			slow.BySize[i].Frees = bySize[i].Frees
   416  		}
   417  
   418  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   419  			chunk := mheap_.pages.tryChunkOf(i)
   420  			if chunk == nil {
   421  				continue
   422  			}
   423  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   424  			slow.HeapReleased += uint64(pg) * pageSize
   425  		}
   426  		for _, p := range allp {
   427  			// Only count scav bits for pages in the cache
   428  			pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
   429  			slow.HeapReleased += uint64(pg) * pageSize
   430  		}
   431  
   432  		getg().m.mallocing--
   433  	})
   434  
   435  	startTheWorld(stw)
   436  	return
   437  }
   438  
   439  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   440  // and verifies that unwinding the new stack doesn't crash, even if the old
   441  // stack has been freed or reused (simulated via poisoning).
   442  func ShrinkStackAndVerifyFramePointers() {
   443  	before := stackPoisonCopy
   444  	defer func() { stackPoisonCopy = before }()
   445  	stackPoisonCopy = 1
   446  
   447  	gp := getg()
   448  	systemstack(func() {
   449  		shrinkstack(gp)
   450  	})
   451  	// If our new stack contains frame pointers into the old stack, this will
   452  	// crash because the old stack has been poisoned.
   453  	FPCallers(make([]uintptr, 1024))
   454  }
   455  
   456  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   457  // stderr, and blocks in a stack containing
   458  // "runtime.blockOnSystemStackInternal".
   459  func BlockOnSystemStack() {
   460  	systemstack(blockOnSystemStackInternal)
   461  }
   462  
   463  func blockOnSystemStackInternal() {
   464  	print("x\n")
   465  	lock(&deadlock)
   466  	lock(&deadlock)
   467  }
   468  
   469  type RWMutex struct {
   470  	rw rwmutex
   471  }
   472  
   473  func (rw *RWMutex) Init() {
   474  	rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
   475  }
   476  
   477  func (rw *RWMutex) RLock() {
   478  	rw.rw.rlock()
   479  }
   480  
   481  func (rw *RWMutex) RUnlock() {
   482  	rw.rw.runlock()
   483  }
   484  
   485  func (rw *RWMutex) Lock() {
   486  	rw.rw.lock()
   487  }
   488  
   489  func (rw *RWMutex) Unlock() {
   490  	rw.rw.unlock()
   491  }
   492  
   493  func LockOSCounts() (external, internal uint32) {
   494  	gp := getg()
   495  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   496  		if gp.lockedm != 0 {
   497  			panic("lockedm on non-locked goroutine")
   498  		}
   499  	} else {
   500  		if gp.lockedm == 0 {
   501  			panic("nil lockedm on locked goroutine")
   502  		}
   503  	}
   504  	return gp.m.lockedExt, gp.m.lockedInt
   505  }
   506  
   507  //go:noinline
   508  func TracebackSystemstack(stk []uintptr, i int) int {
   509  	if i == 0 {
   510  		pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
   511  		var u unwinder
   512  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   513  		return tracebackPCs(&u, 0, stk)
   514  	}
   515  	n := 0
   516  	systemstack(func() {
   517  		n = TracebackSystemstack(stk, i-1)
   518  	})
   519  	return n
   520  }
   521  
   522  func KeepNArenaHints(n int) {
   523  	hint := mheap_.arenaHints
   524  	for i := 1; i < n; i++ {
   525  		hint = hint.next
   526  		if hint == nil {
   527  			return
   528  		}
   529  	}
   530  	hint.next = nil
   531  }
   532  
   533  // MapNextArenaHint reserves a page at the next arena growth hint,
   534  // preventing the arena from growing there, and returns the range of
   535  // addresses that are no longer viable.
   536  //
   537  // This may fail to reserve memory. If it fails, it still returns the
   538  // address range it attempted to reserve.
   539  func MapNextArenaHint() (start, end uintptr, ok bool) {
   540  	hint := mheap_.arenaHints
   541  	addr := hint.addr
   542  	if hint.down {
   543  		start, end = addr-heapArenaBytes, addr
   544  		addr -= physPageSize
   545  	} else {
   546  		start, end = addr, addr+heapArenaBytes
   547  	}
   548  	got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
   549  	ok = (addr == uintptr(got))
   550  	if !ok {
   551  		// We were unable to get the requested reservation.
   552  		// Release what we did get and fail.
   553  		sysFreeOS(got, physPageSize)
   554  	}
   555  	return
   556  }
   557  
   558  func NextArenaHint() (uintptr, bool) {
   559  	if mheap_.arenaHints == nil {
   560  		return 0, false
   561  	}
   562  	return mheap_.arenaHints.addr, true
   563  }
   564  
   565  type G = g
   566  
   567  type Sudog = sudog
   568  
   569  type XRegPerG = xRegPerG
   570  
   571  func Getg() *G {
   572  	return getg()
   573  }
   574  
   575  func Goid() uint64 {
   576  	return getg().goid
   577  }
   578  
   579  func GIsWaitingOnMutex(gp *G) bool {
   580  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   581  }
   582  
   583  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   584  
   585  //go:noinline
   586  func PanicForTesting(b []byte, i int) byte {
   587  	return unexportedPanicForTesting(b, i)
   588  }
   589  
   590  //go:noinline
   591  func unexportedPanicForTesting(b []byte, i int) byte {
   592  	return b[i]
   593  }
   594  
   595  func G0StackOverflow() {
   596  	systemstack(func() {
   597  		g0 := getg()
   598  		sp := sys.GetCallerSP()
   599  		// The stack bounds for g0 stack is not always precise.
   600  		// Use an artificially small stack, to trigger a stack overflow
   601  		// without actually run out of the system stack (which may seg fault).
   602  		g0.stack.lo = sp - 4096 - stackSystem
   603  		g0.stackguard0 = g0.stack.lo + stackGuard
   604  		g0.stackguard1 = g0.stackguard0
   605  
   606  		stackOverflow(nil)
   607  	})
   608  }
   609  
   610  func stackOverflow(x *byte) {
   611  	var buf [256]byte
   612  	stackOverflow(&buf[0])
   613  }
   614  
   615  func RunGetgThreadSwitchTest() {
   616  	// Test that getg works correctly with thread switch.
   617  	// With gccgo, if we generate getg inlined, the backend
   618  	// may cache the address of the TLS variable, which
   619  	// will become invalid after a thread switch. This test
   620  	// checks that the bad caching doesn't happen.
   621  
   622  	ch := make(chan int)
   623  	go func(ch chan int) {
   624  		ch <- 5
   625  		LockOSThread()
   626  	}(ch)
   627  
   628  	g1 := getg()
   629  
   630  	// Block on a receive. This is likely to get us a thread
   631  	// switch. If we yield to the sender goroutine, it will
   632  	// lock the thread, forcing us to resume on a different
   633  	// thread.
   634  	<-ch
   635  
   636  	g2 := getg()
   637  	if g1 != g2 {
   638  		panic("g1 != g2")
   639  	}
   640  
   641  	// Also test getg after some control flow, as the
   642  	// backend is sensitive to control flow.
   643  	g3 := getg()
   644  	if g1 != g3 {
   645  		panic("g1 != g3")
   646  	}
   647  }
   648  
   649  // Expose freegc for testing.
   650  func Freegc(p unsafe.Pointer, size uintptr, noscan bool) {
   651  	freegc(p, size, noscan)
   652  }
   653  
   654  // Expose gcAssistBytes for the current g for testing.
   655  func AssistCredit() int64 {
   656  	assistG := getg()
   657  	if assistG.m.curg != nil {
   658  		assistG = assistG.m.curg
   659  	}
   660  	return assistG.gcAssistBytes
   661  }
   662  
   663  // Expose gcBlackenEnabled for testing.
   664  func GcBlackenEnable() bool {
   665  	// Note we do a non-atomic load here.
   666  	// Some checks against gcBlackenEnabled (e.g., in mallocgc)
   667  	// are currently done via non-atomic load for performance reasons,
   668  	// but other checks are done via atomic load (e.g., in mgcmark.go),
   669  	// so interpreting this value in a test may be subtle.
   670  	return gcBlackenEnabled != 0
   671  }
   672  
   673  const SizeSpecializedMallocEnabled = sizeSpecializedMallocEnabled
   674  
   675  const RuntimeFreegcEnabled = runtimeFreegcEnabled
   676  
   677  const (
   678  	PageSize         = pageSize
   679  	PallocChunkPages = pallocChunkPages
   680  	PageAlloc64Bit   = pageAlloc64Bit
   681  	PallocSumBytes   = pallocSumBytes
   682  )
   683  
   684  // Expose pallocSum for testing.
   685  type PallocSum pallocSum
   686  
   687  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   688  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   689  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   690  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   691  
   692  // Expose pallocBits for testing.
   693  type PallocBits pallocBits
   694  
   695  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   696  	return (*pallocBits)(b).find(npages, searchIdx)
   697  }
   698  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   699  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   700  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   701  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   702  
   703  // SummarizeSlow is a slow but more obviously correct implementation
   704  // of (*pallocBits).summarize. Used for testing.
   705  func SummarizeSlow(b *PallocBits) PallocSum {
   706  	var start, most, end uint
   707  
   708  	const N = uint(len(b)) * 64
   709  	for start < N && (*pageBits)(b).get(start) == 0 {
   710  		start++
   711  	}
   712  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   713  		end++
   714  	}
   715  	run := uint(0)
   716  	for i := uint(0); i < N; i++ {
   717  		if (*pageBits)(b).get(i) == 0 {
   718  			run++
   719  		} else {
   720  			run = 0
   721  		}
   722  		most = max(most, run)
   723  	}
   724  	return PackPallocSum(start, most, end)
   725  }
   726  
   727  // Expose non-trivial helpers for testing.
   728  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   729  
   730  // Given two PallocBits, returns a set of bit ranges where
   731  // they differ.
   732  func DiffPallocBits(a, b *PallocBits) []BitRange {
   733  	ba := (*pageBits)(a)
   734  	bb := (*pageBits)(b)
   735  
   736  	var d []BitRange
   737  	base, size := uint(0), uint(0)
   738  	for i := uint(0); i < uint(len(ba))*64; i++ {
   739  		if ba.get(i) != bb.get(i) {
   740  			if size == 0 {
   741  				base = i
   742  			}
   743  			size++
   744  		} else {
   745  			if size != 0 {
   746  				d = append(d, BitRange{base, size})
   747  			}
   748  			size = 0
   749  		}
   750  	}
   751  	if size != 0 {
   752  		d = append(d, BitRange{base, size})
   753  	}
   754  	return d
   755  }
   756  
   757  // StringifyPallocBits gets the bits in the bit range r from b,
   758  // and returns a string containing the bits as ASCII 0 and 1
   759  // characters.
   760  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   761  	str := ""
   762  	for j := r.I; j < r.I+r.N; j++ {
   763  		if (*pageBits)(b).get(j) != 0 {
   764  			str += "1"
   765  		} else {
   766  			str += "0"
   767  		}
   768  	}
   769  	return str
   770  }
   771  
   772  // Expose pallocData for testing.
   773  type PallocData pallocData
   774  
   775  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   776  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   777  }
   778  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   779  func (d *PallocData) ScavengedSetRange(i, n uint) {
   780  	(*pallocData)(d).scavenged.setRange(i, n)
   781  }
   782  func (d *PallocData) PallocBits() *PallocBits {
   783  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   784  }
   785  func (d *PallocData) Scavenged() *PallocBits {
   786  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   787  }
   788  
   789  // Expose fillAligned for testing.
   790  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   791  
   792  // Expose pageCache for testing.
   793  type PageCache pageCache
   794  
   795  const PageCachePages = pageCachePages
   796  
   797  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   798  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   799  }
   800  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   801  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   802  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   803  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   804  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   805  	return (*pageCache)(c).alloc(npages)
   806  }
   807  func (c *PageCache) Flush(s *PageAlloc) {
   808  	cp := (*pageCache)(c)
   809  	sp := (*pageAlloc)(s)
   810  
   811  	systemstack(func() {
   812  		// None of the tests need any higher-level locking, so we just
   813  		// take the lock internally.
   814  		lock(sp.mheapLock)
   815  		cp.flush(sp)
   816  		unlock(sp.mheapLock)
   817  	})
   818  }
   819  
   820  // Expose chunk index type.
   821  type ChunkIdx chunkIdx
   822  
   823  // Expose pageAlloc for testing. Note that because pageAlloc is
   824  // not in the heap, so is PageAlloc.
   825  type PageAlloc pageAlloc
   826  
   827  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   828  	pp := (*pageAlloc)(p)
   829  
   830  	var addr, scav uintptr
   831  	systemstack(func() {
   832  		// None of the tests need any higher-level locking, so we just
   833  		// take the lock internally.
   834  		lock(pp.mheapLock)
   835  		addr, scav = pp.alloc(npages)
   836  		unlock(pp.mheapLock)
   837  	})
   838  	return addr, scav
   839  }
   840  func (p *PageAlloc) AllocToCache() PageCache {
   841  	pp := (*pageAlloc)(p)
   842  
   843  	var c PageCache
   844  	systemstack(func() {
   845  		// None of the tests need any higher-level locking, so we just
   846  		// take the lock internally.
   847  		lock(pp.mheapLock)
   848  		c = PageCache(pp.allocToCache())
   849  		unlock(pp.mheapLock)
   850  	})
   851  	return c
   852  }
   853  func (p *PageAlloc) Free(base, npages uintptr) {
   854  	pp := (*pageAlloc)(p)
   855  
   856  	systemstack(func() {
   857  		// None of the tests need any higher-level locking, so we just
   858  		// take the lock internally.
   859  		lock(pp.mheapLock)
   860  		pp.free(base, npages)
   861  		unlock(pp.mheapLock)
   862  	})
   863  }
   864  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   865  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   866  }
   867  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   868  	pp := (*pageAlloc)(p)
   869  	systemstack(func() {
   870  		r = pp.scavenge(nbytes, nil, true)
   871  	})
   872  	return
   873  }
   874  func (p *PageAlloc) InUse() []AddrRange {
   875  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   876  	for _, r := range p.inUse.ranges {
   877  		ranges = append(ranges, AddrRange{r})
   878  	}
   879  	return ranges
   880  }
   881  
   882  // Returns nil if the PallocData's L2 is missing.
   883  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   884  	ci := chunkIdx(i)
   885  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   886  }
   887  
   888  // AddrRange is a wrapper around addrRange for testing.
   889  type AddrRange struct {
   890  	addrRange
   891  }
   892  
   893  // MakeAddrRange creates a new address range.
   894  func MakeAddrRange(base, limit uintptr) AddrRange {
   895  	return AddrRange{makeAddrRange(base, limit)}
   896  }
   897  
   898  // Base returns the virtual base address of the address range.
   899  func (a AddrRange) Base() uintptr {
   900  	return a.addrRange.base.addr()
   901  }
   902  
   903  // Base returns the virtual address of the limit of the address range.
   904  func (a AddrRange) Limit() uintptr {
   905  	return a.addrRange.limit.addr()
   906  }
   907  
   908  // Equals returns true if the two address ranges are exactly equal.
   909  func (a AddrRange) Equals(b AddrRange) bool {
   910  	return a == b
   911  }
   912  
   913  // Size returns the size in bytes of the address range.
   914  func (a AddrRange) Size() uintptr {
   915  	return a.addrRange.size()
   916  }
   917  
   918  // testSysStat is the sysStat passed to test versions of various
   919  // runtime structures. We do actually have to keep track of this
   920  // because otherwise memstats.mappedReady won't actually line up
   921  // with other stats in the runtime during tests.
   922  var testSysStat = &memstats.other_sys
   923  
   924  // AddrRanges is a wrapper around addrRanges for testing.
   925  type AddrRanges struct {
   926  	addrRanges
   927  	mutable bool
   928  }
   929  
   930  // NewAddrRanges creates a new empty addrRanges.
   931  //
   932  // Note that this initializes addrRanges just like in the
   933  // runtime, so its memory is persistentalloc'd. Call this
   934  // function sparingly since the memory it allocates is
   935  // leaked.
   936  //
   937  // This AddrRanges is mutable, so we can test methods like
   938  // Add.
   939  func NewAddrRanges() AddrRanges {
   940  	r := addrRanges{}
   941  	r.init(testSysStat)
   942  	return AddrRanges{r, true}
   943  }
   944  
   945  // MakeAddrRanges creates a new addrRanges populated with
   946  // the ranges in a.
   947  //
   948  // The returned AddrRanges is immutable, so methods like
   949  // Add will fail.
   950  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   951  	// Methods that manipulate the backing store of addrRanges.ranges should
   952  	// not be used on the result from this function (e.g. add) since they may
   953  	// trigger reallocation. That would normally be fine, except the new
   954  	// backing store won't come from the heap, but from persistentalloc, so
   955  	// we'll leak some memory implicitly.
   956  	ranges := make([]addrRange, 0, len(a))
   957  	total := uintptr(0)
   958  	for _, r := range a {
   959  		ranges = append(ranges, r.addrRange)
   960  		total += r.Size()
   961  	}
   962  	return AddrRanges{addrRanges{
   963  		ranges:     ranges,
   964  		totalBytes: total,
   965  		sysStat:    testSysStat,
   966  	}, false}
   967  }
   968  
   969  // Ranges returns a copy of the ranges described by the
   970  // addrRanges.
   971  func (a *AddrRanges) Ranges() []AddrRange {
   972  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   973  	for _, r := range a.addrRanges.ranges {
   974  		result = append(result, AddrRange{r})
   975  	}
   976  	return result
   977  }
   978  
   979  // FindSucc returns the successor to base. See addrRanges.findSucc
   980  // for more details.
   981  func (a *AddrRanges) FindSucc(base uintptr) int {
   982  	return a.findSucc(base)
   983  }
   984  
   985  // Add adds a new AddrRange to the AddrRanges.
   986  //
   987  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   988  // otherwise this method will throw.
   989  func (a *AddrRanges) Add(r AddrRange) {
   990  	if !a.mutable {
   991  		throw("attempt to mutate immutable AddrRanges")
   992  	}
   993  	a.add(r.addrRange)
   994  }
   995  
   996  // TotalBytes returns the totalBytes field of the addrRanges.
   997  func (a *AddrRanges) TotalBytes() uintptr {
   998  	return a.addrRanges.totalBytes
   999  }
  1000  
  1001  // BitRange represents a range over a bitmap.
  1002  type BitRange struct {
  1003  	I, N uint // bit index and length in bits
  1004  }
  1005  
  1006  // NewPageAlloc creates a new page allocator for testing and
  1007  // initializes it with the scav and chunks maps. Each key in these maps
  1008  // represents a chunk index and each value is a series of bit ranges to
  1009  // set within each bitmap's chunk.
  1010  //
  1011  // The initialization of the pageAlloc preserves the invariant that if a
  1012  // scavenged bit is set the alloc bit is necessarily unset, so some
  1013  // of the bits described by scav may be cleared in the final bitmap if
  1014  // ranges in chunks overlap with them.
  1015  //
  1016  // scav is optional, and if nil, the scavenged bitmap will be cleared
  1017  // (as opposed to all 1s, which it usually is). Furthermore, every
  1018  // chunk index in scav must appear in chunks; ones that do not are
  1019  // ignored.
  1020  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
  1021  	p := new(pageAlloc)
  1022  
  1023  	// We've got an entry, so initialize the pageAlloc.
  1024  	p.init(new(mutex), testSysStat, true)
  1025  	lockInit(p.mheapLock, lockRankMheap)
  1026  	for i, init := range chunks {
  1027  		addr := chunkBase(chunkIdx(i))
  1028  
  1029  		// Mark the chunk's existence in the pageAlloc.
  1030  		systemstack(func() {
  1031  			lock(p.mheapLock)
  1032  			p.grow(addr, pallocChunkBytes)
  1033  			unlock(p.mheapLock)
  1034  		})
  1035  
  1036  		// Initialize the bitmap and update pageAlloc metadata.
  1037  		ci := chunkIndex(addr)
  1038  		chunk := p.chunkOf(ci)
  1039  
  1040  		// Clear all the scavenged bits which grow set.
  1041  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1042  
  1043  		// Simulate the allocation and subsequent free of all pages in
  1044  		// the chunk for the scavenge index. This sets the state equivalent
  1045  		// with all pages within the index being free.
  1046  		p.scav.index.alloc(ci, pallocChunkPages)
  1047  		p.scav.index.free(ci, 0, pallocChunkPages)
  1048  
  1049  		// Apply scavenge state if applicable.
  1050  		if scav != nil {
  1051  			if scvg, ok := scav[i]; ok {
  1052  				for _, s := range scvg {
  1053  					// Ignore the case of s.N == 0. setRange doesn't handle
  1054  					// it and it's a no-op anyway.
  1055  					if s.N != 0 {
  1056  						chunk.scavenged.setRange(s.I, s.N)
  1057  					}
  1058  				}
  1059  			}
  1060  		}
  1061  
  1062  		// Apply alloc state.
  1063  		for _, s := range init {
  1064  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1065  			// it and it's a no-op anyway.
  1066  			if s.N != 0 {
  1067  				chunk.allocRange(s.I, s.N)
  1068  
  1069  				// Make sure the scavenge index is updated.
  1070  				p.scav.index.alloc(ci, s.N)
  1071  			}
  1072  		}
  1073  
  1074  		// Update heap metadata for the allocRange calls above.
  1075  		systemstack(func() {
  1076  			lock(p.mheapLock)
  1077  			p.update(addr, pallocChunkPages, false, false)
  1078  			unlock(p.mheapLock)
  1079  		})
  1080  	}
  1081  
  1082  	return (*PageAlloc)(p)
  1083  }
  1084  
  1085  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1086  // is called the pageAlloc may no longer be used. The object itself will be
  1087  // collected by the garbage collector once it is no longer live.
  1088  func FreePageAlloc(pp *PageAlloc) {
  1089  	p := (*pageAlloc)(pp)
  1090  
  1091  	// Free all the mapped space for the summary levels.
  1092  	if pageAlloc64Bit != 0 {
  1093  		for l := 0; l < summaryLevels; l++ {
  1094  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1095  		}
  1096  	} else {
  1097  		resSize := uintptr(0)
  1098  		for _, s := range p.summary {
  1099  			resSize += uintptr(cap(s)) * pallocSumBytes
  1100  		}
  1101  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1102  	}
  1103  
  1104  	// Free extra data structures.
  1105  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1106  
  1107  	// Subtract back out whatever we mapped for the summaries.
  1108  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1109  	// (and in anger should actually be accounted for), and there's no other
  1110  	// way to figure out how much we actually mapped.
  1111  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1112  	testSysStat.add(-int64(p.summaryMappedReady))
  1113  
  1114  	// Free the mapped space for chunks.
  1115  	for i := range p.chunks {
  1116  		if x := p.chunks[i]; x != nil {
  1117  			p.chunks[i] = nil
  1118  			// This memory comes from sysAlloc and will always be page-aligned.
  1119  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1120  		}
  1121  	}
  1122  }
  1123  
  1124  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1125  // 64 bit and 32 bit platforms, allowing the tests to share code
  1126  // between the two.
  1127  //
  1128  // This should not be higher than 0x100*pallocChunkBytes to support
  1129  // mips and mipsle, which only have 31-bit address spaces.
  1130  var BaseChunkIdx = func() ChunkIdx {
  1131  	var prefix uintptr
  1132  	if pageAlloc64Bit != 0 {
  1133  		prefix = 0xc000
  1134  	} else {
  1135  		prefix = 0x100
  1136  	}
  1137  	baseAddr := prefix * pallocChunkBytes
  1138  	if goos.IsAix != 0 {
  1139  		baseAddr += arenaBaseOffset
  1140  	}
  1141  	return ChunkIdx(chunkIndex(baseAddr))
  1142  }()
  1143  
  1144  // PageBase returns an address given a chunk index and a page index
  1145  // relative to that chunk.
  1146  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1147  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1148  }
  1149  
  1150  type BitsMismatch struct {
  1151  	Base      uintptr
  1152  	Got, Want uint64
  1153  }
  1154  
  1155  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1156  	ok = true
  1157  
  1158  	// Run on the system stack to avoid stack growth allocation.
  1159  	systemstack(func() {
  1160  		getg().m.mallocing++
  1161  
  1162  		// Lock so that we can safely access the bitmap.
  1163  		lock(&mheap_.lock)
  1164  
  1165  	chunkLoop:
  1166  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1167  			chunk := mheap_.pages.tryChunkOf(i)
  1168  			if chunk == nil {
  1169  				continue
  1170  			}
  1171  			cb := chunkBase(i)
  1172  			for j := 0; j < pallocChunkPages/64; j++ {
  1173  				// Run over each 64-bit bitmap section and ensure
  1174  				// scavenged is being cleared properly on allocation.
  1175  				// If a used bit and scavenged bit are both set, that's
  1176  				// an error, and could indicate a larger problem, or
  1177  				// an accounting problem.
  1178  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1179  				got := chunk.scavenged[j]
  1180  				if want != got {
  1181  					ok = false
  1182  					if n >= len(mismatches) {
  1183  						break chunkLoop
  1184  					}
  1185  					mismatches[n] = BitsMismatch{
  1186  						Base: cb + uintptr(j)*64*pageSize,
  1187  						Got:  got,
  1188  						Want: want,
  1189  					}
  1190  					n++
  1191  				}
  1192  			}
  1193  		}
  1194  		unlock(&mheap_.lock)
  1195  
  1196  		getg().m.mallocing--
  1197  	})
  1198  
  1199  	if randomizeHeapBase && len(mismatches) > 0 {
  1200  		// When goexperiment.RandomizedHeapBase64 is set we use a series of
  1201  		// padding pages to generate randomized heap base address which have
  1202  		// both the alloc and scav bits set. Because of this we expect exactly
  1203  		// one arena will have mismatches, so check for that explicitly and
  1204  		// remove the mismatches if that property holds. If we see more than one
  1205  		// arena with this property, that is an indication something has
  1206  		// actually gone wrong, so return the mismatches.
  1207  		//
  1208  		// We do this, instead of ignoring the mismatches in the chunkLoop, because
  1209  		// it's not easy to determine which arena we added the padding pages to
  1210  		// programmatically, without explicitly recording the base address somewhere
  1211  		// in a global variable (which we'd rather not do as the address of that variable
  1212  		// is likely to be somewhat predictable, potentially defeating the purpose
  1213  		// of our randomization).
  1214  		affectedArenas := map[arenaIdx]bool{}
  1215  		for _, mismatch := range mismatches {
  1216  			if mismatch.Base > 0 {
  1217  				affectedArenas[arenaIndex(mismatch.Base)] = true
  1218  			}
  1219  		}
  1220  		if len(affectedArenas) == 1 {
  1221  			ok = true
  1222  			// zero the mismatches
  1223  			for i := range n {
  1224  				mismatches[i] = BitsMismatch{}
  1225  			}
  1226  		}
  1227  	}
  1228  
  1229  	return
  1230  }
  1231  
  1232  func PageCachePagesLeaked() (leaked uintptr) {
  1233  	stw := stopTheWorld(stwForTestPageCachePagesLeaked)
  1234  
  1235  	// Walk over destroyed Ps and look for unflushed caches.
  1236  	deadp := allp[len(allp):cap(allp)]
  1237  	for _, p := range deadp {
  1238  		// Since we're going past len(allp) we may see nil Ps.
  1239  		// Just ignore them.
  1240  		if p != nil {
  1241  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1242  		}
  1243  	}
  1244  
  1245  	startTheWorld(stw)
  1246  	return
  1247  }
  1248  
  1249  var ProcYield = procyield
  1250  var OSYield = osyield
  1251  
  1252  type Mutex = mutex
  1253  
  1254  var Lock = lock
  1255  var Unlock = unlock
  1256  
  1257  var MutexContended = mutexContended
  1258  
  1259  func SemRootLock(addr *uint32) *mutex {
  1260  	root := semtable.rootFor(addr)
  1261  	return &root.lock
  1262  }
  1263  
  1264  var Semacquire = semacquire
  1265  var Semrelease1 = semrelease1
  1266  
  1267  func SemNwait(addr *uint32) uint32 {
  1268  	root := semtable.rootFor(addr)
  1269  	return root.nwait.Load()
  1270  }
  1271  
  1272  const SemTableSize = semTabSize
  1273  
  1274  // SemTable is a wrapper around semTable exported for testing.
  1275  type SemTable struct {
  1276  	semTable
  1277  }
  1278  
  1279  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1280  func (t *SemTable) Enqueue(addr *uint32) {
  1281  	s := acquireSudog()
  1282  	s.releasetime = 0
  1283  	s.acquiretime = 0
  1284  	s.ticket = 0
  1285  	t.semTable.rootFor(addr).queue(addr, s, false)
  1286  }
  1287  
  1288  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1289  //
  1290  // Returns true if there actually was a waiter to be dequeued.
  1291  func (t *SemTable) Dequeue(addr *uint32) bool {
  1292  	s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
  1293  	if s != nil {
  1294  		releaseSudog(s)
  1295  		return true
  1296  	}
  1297  	return false
  1298  }
  1299  
  1300  // mspan wrapper for testing.
  1301  type MSpan mspan
  1302  
  1303  // Allocate an mspan for testing.
  1304  func AllocMSpan() *MSpan {
  1305  	var s *mspan
  1306  	systemstack(func() {
  1307  		lock(&mheap_.lock)
  1308  		s = (*mspan)(mheap_.spanalloc.alloc())
  1309  		s.init(0, 0)
  1310  		unlock(&mheap_.lock)
  1311  	})
  1312  	return (*MSpan)(s)
  1313  }
  1314  
  1315  // Free an allocated mspan.
  1316  func FreeMSpan(s *MSpan) {
  1317  	systemstack(func() {
  1318  		lock(&mheap_.lock)
  1319  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1320  		unlock(&mheap_.lock)
  1321  	})
  1322  }
  1323  
  1324  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1325  	s := (*mspan)(ms)
  1326  	s.nelems = uint16(len(bits) * 8)
  1327  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1328  	result := s.countAlloc()
  1329  	s.gcmarkBits = nil
  1330  	return result
  1331  }
  1332  
  1333  const (
  1334  	TimeHistSubBucketBits = timeHistSubBucketBits
  1335  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1336  	TimeHistNumBuckets    = timeHistNumBuckets
  1337  	TimeHistMinBucketBits = timeHistMinBucketBits
  1338  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1339  )
  1340  
  1341  type TimeHistogram timeHistogram
  1342  
  1343  // Count returns the counts for the given bucket, subBucket indices.
  1344  // Returns true if the bucket was valid, otherwise returns the counts
  1345  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1346  // bucket < 0, and false.
  1347  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1348  	t := (*timeHistogram)(th)
  1349  	if bucket < 0 {
  1350  		return t.underflow.Load(), false
  1351  	}
  1352  	i := bucket*TimeHistNumSubBuckets + subBucket
  1353  	if i >= len(t.counts) {
  1354  		return t.overflow.Load(), false
  1355  	}
  1356  	return t.counts[i].Load(), true
  1357  }
  1358  
  1359  func (th *TimeHistogram) Record(duration int64) {
  1360  	(*timeHistogram)(th).record(duration)
  1361  }
  1362  
  1363  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1364  
  1365  func SetIntArgRegs(a int) int {
  1366  	lock(&finlock)
  1367  	old := intArgRegs
  1368  	if a >= 0 {
  1369  		intArgRegs = a
  1370  	}
  1371  	unlock(&finlock)
  1372  	return old
  1373  }
  1374  
  1375  func FinalizerGAsleep() bool {
  1376  	return fingStatus.Load()&fingWait != 0
  1377  }
  1378  
  1379  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1380  // extra layer of call, since then there's a return before the "real"
  1381  // next call.
  1382  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1383  
  1384  // For GCTestIsReachable, it's important that we do this as a call so
  1385  // escape analysis can see through it.
  1386  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1387  	return gcTestIsReachable(ptrs...)
  1388  }
  1389  
  1390  // For GCTestPointerClass, it's important that we do this as a call so
  1391  // escape analysis can see through it.
  1392  //
  1393  // This is nosplit because gcTestPointerClass is.
  1394  //
  1395  //go:nosplit
  1396  func GCTestPointerClass(p unsafe.Pointer) string {
  1397  	return gcTestPointerClass(p)
  1398  }
  1399  
  1400  const Raceenabled = raceenabled
  1401  
  1402  const (
  1403  	GCBackgroundUtilization            = gcBackgroundUtilization
  1404  	GCGoalUtilization                  = gcGoalUtilization
  1405  	DefaultHeapMinimum                 = defaultHeapMinimum
  1406  	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
  1407  	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
  1408  )
  1409  
  1410  type GCController struct {
  1411  	gcControllerState
  1412  }
  1413  
  1414  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1415  	// Force the controller to escape. We're going to
  1416  	// do 64-bit atomics on it, and if it gets stack-allocated
  1417  	// on a 32-bit architecture, it may get allocated unaligned
  1418  	// space.
  1419  	g := Escape(new(GCController))
  1420  	g.gcControllerState.test = true // Mark it as a test copy.
  1421  	g.init(int32(gcPercent), memoryLimit)
  1422  	return g
  1423  }
  1424  
  1425  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1426  	trigger, _ := c.trigger()
  1427  	if c.heapMarked > trigger {
  1428  		trigger = c.heapMarked
  1429  	}
  1430  	c.maxStackScan.Store(stackSize)
  1431  	c.globalsScan.Store(globalsSize)
  1432  	c.heapLive.Store(trigger)
  1433  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1434  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1435  }
  1436  
  1437  func (c *GCController) AssistWorkPerByte() float64 {
  1438  	return c.assistWorkPerByte.Load()
  1439  }
  1440  
  1441  func (c *GCController) HeapGoal() uint64 {
  1442  	return c.heapGoal()
  1443  }
  1444  
  1445  func (c *GCController) HeapLive() uint64 {
  1446  	return c.heapLive.Load()
  1447  }
  1448  
  1449  func (c *GCController) HeapMarked() uint64 {
  1450  	return c.heapMarked
  1451  }
  1452  
  1453  func (c *GCController) Triggered() uint64 {
  1454  	return c.triggered
  1455  }
  1456  
  1457  type GCControllerReviseDelta struct {
  1458  	HeapLive        int64
  1459  	HeapScan        int64
  1460  	HeapScanWork    int64
  1461  	StackScanWork   int64
  1462  	GlobalsScanWork int64
  1463  }
  1464  
  1465  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1466  	c.heapLive.Add(d.HeapLive)
  1467  	c.heapScan.Add(d.HeapScan)
  1468  	c.heapScanWork.Add(d.HeapScanWork)
  1469  	c.stackScanWork.Add(d.StackScanWork)
  1470  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1471  	c.revise()
  1472  }
  1473  
  1474  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1475  	c.assistTime.Store(assistTime)
  1476  	c.endCycle(elapsed, gomaxprocs, false)
  1477  	c.resetLive(bytesMarked)
  1478  	c.commit(false)
  1479  }
  1480  
  1481  func (c *GCController) AddIdleMarkWorker() bool {
  1482  	return c.addIdleMarkWorker()
  1483  }
  1484  
  1485  func (c *GCController) NeedIdleMarkWorker() bool {
  1486  	return c.needIdleMarkWorker()
  1487  }
  1488  
  1489  func (c *GCController) RemoveIdleMarkWorker() {
  1490  	c.removeIdleMarkWorker()
  1491  }
  1492  
  1493  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1494  	c.setMaxIdleMarkWorkers(max)
  1495  }
  1496  
  1497  var alwaysFalse bool
  1498  var escapeSink any
  1499  
  1500  func Escape[T any](x T) T {
  1501  	if alwaysFalse {
  1502  		escapeSink = x
  1503  	}
  1504  	return x
  1505  }
  1506  
  1507  // Acquirem blocks preemption.
  1508  func Acquirem() {
  1509  	acquirem()
  1510  }
  1511  
  1512  func Releasem() {
  1513  	releasem(getg().m)
  1514  }
  1515  
  1516  // GoschedIfBusy is an explicit preemption check to call back
  1517  // into the scheduler. This is useful for tests that run code
  1518  // which spend most of their time as non-preemptible, as it
  1519  // can be placed right after becoming preemptible again to ensure
  1520  // that the scheduler gets a chance to preempt the goroutine.
  1521  func GoschedIfBusy() {
  1522  	goschedIfBusy()
  1523  }
  1524  
  1525  type PIController struct {
  1526  	piController
  1527  }
  1528  
  1529  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1530  	return &PIController{piController{
  1531  		kp:  kp,
  1532  		ti:  ti,
  1533  		tt:  tt,
  1534  		min: min,
  1535  		max: max,
  1536  	}}
  1537  }
  1538  
  1539  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1540  	return c.piController.next(input, setpoint, period)
  1541  }
  1542  
  1543  const (
  1544  	CapacityPerProc          = capacityPerProc
  1545  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1546  )
  1547  
  1548  type GCCPULimiter struct {
  1549  	limiter gcCPULimiterState
  1550  }
  1551  
  1552  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1553  	// Force the controller to escape. We're going to
  1554  	// do 64-bit atomics on it, and if it gets stack-allocated
  1555  	// on a 32-bit architecture, it may get allocated unaligned
  1556  	// space.
  1557  	l := Escape(new(GCCPULimiter))
  1558  	l.limiter.test = true
  1559  	l.limiter.resetCapacity(now, gomaxprocs)
  1560  	return l
  1561  }
  1562  
  1563  func (l *GCCPULimiter) Fill() uint64 {
  1564  	return l.limiter.bucket.fill
  1565  }
  1566  
  1567  func (l *GCCPULimiter) Capacity() uint64 {
  1568  	return l.limiter.bucket.capacity
  1569  }
  1570  
  1571  func (l *GCCPULimiter) Overflow() uint64 {
  1572  	return l.limiter.overflow
  1573  }
  1574  
  1575  func (l *GCCPULimiter) Limiting() bool {
  1576  	return l.limiter.limiting()
  1577  }
  1578  
  1579  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1580  	return l.limiter.needUpdate(now)
  1581  }
  1582  
  1583  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1584  	l.limiter.startGCTransition(enableGC, now)
  1585  }
  1586  
  1587  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1588  	l.limiter.finishGCTransition(now)
  1589  }
  1590  
  1591  func (l *GCCPULimiter) Update(now int64) {
  1592  	l.limiter.update(now)
  1593  }
  1594  
  1595  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1596  	l.limiter.addAssistTime(t)
  1597  }
  1598  
  1599  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1600  	l.limiter.resetCapacity(now, nprocs)
  1601  }
  1602  
  1603  const ScavengePercent = scavengePercent
  1604  
  1605  type Scavenger struct {
  1606  	Sleep      func(int64) int64
  1607  	Scavenge   func(uintptr) (uintptr, int64)
  1608  	ShouldStop func() bool
  1609  	GoMaxProcs func() int32
  1610  
  1611  	released  atomic.Uintptr
  1612  	scavenger scavengerState
  1613  	stop      chan<- struct{}
  1614  	done      <-chan struct{}
  1615  }
  1616  
  1617  func (s *Scavenger) Start() {
  1618  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1619  		panic("must populate all stubs")
  1620  	}
  1621  
  1622  	// Install hooks.
  1623  	s.scavenger.sleepStub = s.Sleep
  1624  	s.scavenger.scavenge = s.Scavenge
  1625  	s.scavenger.shouldStop = s.ShouldStop
  1626  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1627  
  1628  	// Start up scavenger goroutine, and wait for it to be ready.
  1629  	stop := make(chan struct{})
  1630  	s.stop = stop
  1631  	done := make(chan struct{})
  1632  	s.done = done
  1633  	go func() {
  1634  		// This should match bgscavenge, loosely.
  1635  		s.scavenger.init()
  1636  		s.scavenger.park()
  1637  		for {
  1638  			select {
  1639  			case <-stop:
  1640  				close(done)
  1641  				return
  1642  			default:
  1643  			}
  1644  			released, workTime := s.scavenger.run()
  1645  			if released == 0 {
  1646  				s.scavenger.park()
  1647  				continue
  1648  			}
  1649  			s.released.Add(released)
  1650  			s.scavenger.sleep(workTime)
  1651  		}
  1652  	}()
  1653  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1654  		panic("timed out waiting for scavenger to get ready")
  1655  	}
  1656  }
  1657  
  1658  // BlockUntilParked blocks until the scavenger parks, or until
  1659  // timeout is exceeded. Returns true if the scavenger parked.
  1660  //
  1661  // Note that in testing, parked means something slightly different.
  1662  // In anger, the scavenger parks to sleep, too, but in testing,
  1663  // it only parks when it actually has no work to do.
  1664  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1665  	// Just spin, waiting for it to park.
  1666  	//
  1667  	// The actual parking process is racy with respect to
  1668  	// wakeups, which is fine, but for testing we need something
  1669  	// a bit more robust.
  1670  	start := nanotime()
  1671  	for nanotime()-start < timeout {
  1672  		lock(&s.scavenger.lock)
  1673  		parked := s.scavenger.parked
  1674  		unlock(&s.scavenger.lock)
  1675  		if parked {
  1676  			return true
  1677  		}
  1678  		Gosched()
  1679  	}
  1680  	return false
  1681  }
  1682  
  1683  // Released returns how many bytes the scavenger released.
  1684  func (s *Scavenger) Released() uintptr {
  1685  	return s.released.Load()
  1686  }
  1687  
  1688  // Wake wakes up a parked scavenger to keep running.
  1689  func (s *Scavenger) Wake() {
  1690  	s.scavenger.wake()
  1691  }
  1692  
  1693  // Stop cleans up the scavenger's resources. The scavenger
  1694  // must be parked for this to work.
  1695  func (s *Scavenger) Stop() {
  1696  	lock(&s.scavenger.lock)
  1697  	parked := s.scavenger.parked
  1698  	unlock(&s.scavenger.lock)
  1699  	if !parked {
  1700  		panic("tried to clean up scavenger that is not parked")
  1701  	}
  1702  	close(s.stop)
  1703  	s.Wake()
  1704  	<-s.done
  1705  }
  1706  
  1707  type ScavengeIndex struct {
  1708  	i scavengeIndex
  1709  }
  1710  
  1711  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1712  	s := new(ScavengeIndex)
  1713  	// This is a bit lazy but we easily guarantee we'll be able
  1714  	// to reference all the relevant chunks. The worst-case
  1715  	// memory usage here is 512 MiB, but tests generally use
  1716  	// small offsets from BaseChunkIdx, which results in ~100s
  1717  	// of KiB in memory use.
  1718  	//
  1719  	// This may still be worth making better, at least by sharing
  1720  	// this fairly large array across calls with a sync.Pool or
  1721  	// something. Currently, when the tests are run serially,
  1722  	// it takes around 0.5s. Not all that much, but if we have
  1723  	// a lot of tests like this it could add up.
  1724  	s.i.chunks = make([]atomicScavChunkData, max)
  1725  	s.i.min.Store(uintptr(min))
  1726  	s.i.max.Store(uintptr(max))
  1727  	s.i.minHeapIdx.Store(uintptr(min))
  1728  	s.i.test = true
  1729  	return s
  1730  }
  1731  
  1732  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1733  	ci, off := s.i.find(force)
  1734  	return ChunkIdx(ci), off
  1735  }
  1736  
  1737  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1738  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1739  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1740  
  1741  	if sc == ec {
  1742  		// The range doesn't cross any chunk boundaries.
  1743  		s.i.alloc(sc, ei+1-si)
  1744  	} else {
  1745  		// The range crosses at least one chunk boundary.
  1746  		s.i.alloc(sc, pallocChunkPages-si)
  1747  		for c := sc + 1; c < ec; c++ {
  1748  			s.i.alloc(c, pallocChunkPages)
  1749  		}
  1750  		s.i.alloc(ec, ei+1)
  1751  	}
  1752  }
  1753  
  1754  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1755  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1756  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1757  
  1758  	if sc == ec {
  1759  		// The range doesn't cross any chunk boundaries.
  1760  		s.i.free(sc, si, ei+1-si)
  1761  	} else {
  1762  		// The range crosses at least one chunk boundary.
  1763  		s.i.free(sc, si, pallocChunkPages-si)
  1764  		for c := sc + 1; c < ec; c++ {
  1765  			s.i.free(c, 0, pallocChunkPages)
  1766  		}
  1767  		s.i.free(ec, 0, ei+1)
  1768  	}
  1769  }
  1770  
  1771  func (s *ScavengeIndex) ResetSearchAddrs() {
  1772  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1773  		addr, marked := a.Load()
  1774  		if marked {
  1775  			a.StoreUnmark(addr, addr)
  1776  		}
  1777  		a.Clear()
  1778  	}
  1779  	s.i.freeHWM = minOffAddr
  1780  }
  1781  
  1782  func (s *ScavengeIndex) NextGen() {
  1783  	s.i.nextGen()
  1784  }
  1785  
  1786  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1787  	s.i.setEmpty(chunkIdx(ci))
  1788  }
  1789  
  1790  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1791  	sc0 := scavChunkData{
  1792  		gen:            gen,
  1793  		inUse:          inUse,
  1794  		lastInUse:      lastInUse,
  1795  		scavChunkFlags: scavChunkFlags(flags),
  1796  	}
  1797  	scp := sc0.pack()
  1798  	sc1 := unpackScavChunkData(scp)
  1799  	return sc0 == sc1
  1800  }
  1801  
  1802  const GTrackingPeriod = gTrackingPeriod
  1803  
  1804  var ZeroBase = unsafe.Pointer(&zerobase)
  1805  
  1806  const UserArenaChunkBytes = userArenaChunkBytes
  1807  
  1808  type UserArena struct {
  1809  	arena *userArena
  1810  }
  1811  
  1812  func NewUserArena() *UserArena {
  1813  	return &UserArena{newUserArena()}
  1814  }
  1815  
  1816  func (a *UserArena) New(out *any) {
  1817  	i := efaceOf(out)
  1818  	typ := i._type
  1819  	if typ.Kind() != abi.Pointer {
  1820  		panic("new result of non-ptr type")
  1821  	}
  1822  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1823  	i.data = a.arena.new(typ)
  1824  }
  1825  
  1826  func (a *UserArena) Slice(sl any, cap int) {
  1827  	a.arena.slice(sl, cap)
  1828  }
  1829  
  1830  func (a *UserArena) Free() {
  1831  	a.arena.free()
  1832  }
  1833  
  1834  func GlobalWaitingArenaChunks() int {
  1835  	n := 0
  1836  	systemstack(func() {
  1837  		lock(&mheap_.lock)
  1838  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1839  			n++
  1840  		}
  1841  		unlock(&mheap_.lock)
  1842  	})
  1843  	return n
  1844  }
  1845  
  1846  func UserArenaClone[T any](s T) T {
  1847  	return arena_heapify(s).(T)
  1848  }
  1849  
  1850  var AlignUp = alignUp
  1851  
  1852  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1853  	return blockUntilEmptyFinalizerQueue(timeout)
  1854  }
  1855  
  1856  func BlockUntilEmptyCleanupQueue(timeout int64) bool {
  1857  	return gcCleanups.blockUntilEmpty(timeout)
  1858  }
  1859  
  1860  func FrameStartLine(f *Frame) int {
  1861  	return f.startLine
  1862  }
  1863  
  1864  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1865  // This memory will never be freed; use sparingly.
  1866  func PersistentAlloc(n, align uintptr) unsafe.Pointer {
  1867  	return persistentalloc(n, align, &memstats.other_sys)
  1868  }
  1869  
  1870  const TagAlign = tagAlign
  1871  
  1872  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1873  // pcBuf with the return addresses of the physical frames on the stack.
  1874  func FPCallers(pcBuf []uintptr) int {
  1875  	return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
  1876  }
  1877  
  1878  const FramePointerEnabled = framepointer_enabled
  1879  
  1880  var (
  1881  	IsPinned      = isPinned
  1882  	GetPinCounter = pinnerGetPinCounter
  1883  )
  1884  
  1885  func SetPinnerLeakPanic(f func()) {
  1886  	pinnerLeakPanic = f
  1887  }
  1888  func GetPinnerLeakPanic() func() {
  1889  	return pinnerLeakPanic
  1890  }
  1891  
  1892  var testUintptr uintptr
  1893  
  1894  func MyGenericFunc[T any]() {
  1895  	systemstack(func() {
  1896  		testUintptr = 4
  1897  	})
  1898  }
  1899  
  1900  func UnsafePoint(pc uintptr) bool {
  1901  	fi := findfunc(pc)
  1902  	v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
  1903  	switch v {
  1904  	case abi.UnsafePointUnsafe:
  1905  		return true
  1906  	case abi.UnsafePointSafe:
  1907  		return false
  1908  	case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
  1909  		// These are all interruptible, they just encode a nonstandard
  1910  		// way of recovering when interrupted.
  1911  		return false
  1912  	default:
  1913  		var buf [20]byte
  1914  		panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
  1915  	}
  1916  }
  1917  
  1918  type TraceMap struct {
  1919  	traceMap
  1920  }
  1921  
  1922  func (m *TraceMap) PutString(s string) (uint64, bool) {
  1923  	return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
  1924  }
  1925  
  1926  func (m *TraceMap) Reset() {
  1927  	m.traceMap.reset()
  1928  }
  1929  
  1930  func SetSpinInGCMarkDone(spin bool) {
  1931  	gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
  1932  }
  1933  
  1934  func GCMarkDoneRestarted() bool {
  1935  	// Only read this outside of the GC. If we're running during a GC, just report false.
  1936  	mp := acquirem()
  1937  	if gcphase != _GCoff {
  1938  		releasem(mp)
  1939  		return false
  1940  	}
  1941  	restarted := gcDebugMarkDone.restartedDueTo27993
  1942  	releasem(mp)
  1943  	return restarted
  1944  }
  1945  
  1946  func GCMarkDoneResetRestartFlag() {
  1947  	mp := acquirem()
  1948  	for gcphase != _GCoff {
  1949  		releasem(mp)
  1950  		Gosched()
  1951  		mp = acquirem()
  1952  	}
  1953  	gcDebugMarkDone.restartedDueTo27993 = false
  1954  	releasem(mp)
  1955  }
  1956  
  1957  type BitCursor struct {
  1958  	b bitCursor
  1959  }
  1960  
  1961  func NewBitCursor(buf *byte) BitCursor {
  1962  	return BitCursor{b: bitCursor{ptr: buf, n: 0}}
  1963  }
  1964  
  1965  func (b BitCursor) Write(data *byte, cnt uintptr) {
  1966  	b.b.write(data, cnt)
  1967  }
  1968  func (b BitCursor) Offset(cnt uintptr) BitCursor {
  1969  	return BitCursor{b: b.b.offset(cnt)}
  1970  }
  1971  
  1972  const (
  1973  	BubbleAssocUnbubbled     = bubbleAssocUnbubbled
  1974  	BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
  1975  	BubbleAssocOtherBubble   = bubbleAssocOtherBubble
  1976  )
  1977  
  1978  type TraceStackTable traceStackTable
  1979  
  1980  func (t *TraceStackTable) Reset() {
  1981  	t.tab.reset()
  1982  }
  1983  
  1984  func TraceStack(gp *G, tab *TraceStackTable) {
  1985  	traceStack(0, gp, (*traceStackTable)(tab))
  1986  }
  1987  
  1988  var X86HasAVX = &x86HasAVX
  1989  
  1990  var DebugDecorateMappings = &debug.decoratemappings
  1991  
  1992  func SetVMANameSupported() bool { return setVMANameSupported() }
  1993  
  1994  type ListHead struct {
  1995  	l listHead
  1996  }
  1997  
  1998  func (head *ListHead) Init(off uintptr) {
  1999  	head.l.init(off)
  2000  }
  2001  
  2002  type ListNode struct {
  2003  	l listNode
  2004  }
  2005  
  2006  func (head *ListHead) Push(p unsafe.Pointer) {
  2007  	head.l.push(p)
  2008  }
  2009  
  2010  func (head *ListHead) Pop() unsafe.Pointer {
  2011  	return head.l.pop()
  2012  }
  2013  
  2014  func (head *ListHead) Remove(p unsafe.Pointer) {
  2015  	head.l.remove(p)
  2016  }
  2017  
  2018  type ListHeadManual struct {
  2019  	l listHeadManual
  2020  }
  2021  
  2022  func (head *ListHeadManual) Init(off uintptr) {
  2023  	head.l.init(off)
  2024  }
  2025  
  2026  type ListNodeManual struct {
  2027  	l listNodeManual
  2028  }
  2029  
  2030  func (head *ListHeadManual) Push(p unsafe.Pointer) {
  2031  	head.l.push(p)
  2032  }
  2033  
  2034  func (head *ListHeadManual) Pop() unsafe.Pointer {
  2035  	return head.l.pop()
  2036  }
  2037  
  2038  func (head *ListHeadManual) Remove(p unsafe.Pointer) {
  2039  	head.l.remove(p)
  2040  }
  2041  
  2042  func Hexdumper(base uintptr, wordBytes int, mark func(addr uintptr, start func()), data ...[]byte) string {
  2043  	buf := make([]byte, 0, 2048)
  2044  	getg().writebuf = buf
  2045  	h := hexdumper{addr: base, addrBytes: 4, wordBytes: uint8(wordBytes)}
  2046  	if mark != nil {
  2047  		h.mark = func(addr uintptr, m hexdumpMarker) {
  2048  			mark(addr, m.start)
  2049  		}
  2050  	}
  2051  	for _, d := range data {
  2052  		h.write(d)
  2053  	}
  2054  	h.close()
  2055  	n := len(getg().writebuf)
  2056  	getg().writebuf = nil
  2057  	if n == cap(buf) {
  2058  		panic("Hexdumper buf too small")
  2059  	}
  2060  	return string(buf[:n])
  2061  }
  2062  
  2063  func HexdumpWords(p, bytes uintptr) string {
  2064  	buf := make([]byte, 0, 2048)
  2065  	getg().writebuf = buf
  2066  	hexdumpWords(p, bytes, nil)
  2067  	n := len(getg().writebuf)
  2068  	getg().writebuf = nil
  2069  	if n == cap(buf) {
  2070  		panic("HexdumpWords buf too small")
  2071  	}
  2072  	return string(buf[:n])
  2073  }
  2074  
  2075  // DumpPrintQuoted provides access to print(quoted()) for the tests in
  2076  // runtime/print_quoted_test.go, allowing us to test that implementation.
  2077  func DumpPrintQuoted(s string) string {
  2078  	gp := getg()
  2079  	gp.writebuf = make([]byte, 0, 1<<20)
  2080  	print(quoted(s))
  2081  	buf := gp.writebuf
  2082  	gp.writebuf = nil
  2083  
  2084  	return string(buf)
  2085  }
  2086  

View as plain text