Source file src/runtime/export_test.go

     1  // Copyright 2010 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Export guts for testing.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/goos"
    13  	"internal/runtime/atomic"
    14  	"internal/runtime/gc"
    15  	"internal/runtime/sys"
    16  	"unsafe"
    17  )
    18  
    19  var Fadd64 = fadd64
    20  var Fsub64 = fsub64
    21  var Fmul64 = fmul64
    22  var Fdiv64 = fdiv64
    23  var F64to32 = f64to32
    24  var F32to64 = f32to64
    25  var Fcmp64 = fcmp64
    26  var Fintto64 = fintto64
    27  var F64toint = f64toint
    28  
    29  var Entersyscall = entersyscall
    30  var Exitsyscall = exitsyscall
    31  var LockedOSThread = lockedOSThread
    32  var Xadduintptr = atomic.Xadduintptr
    33  
    34  var ReadRandomFailed = &readRandomFailed
    35  
    36  var Fastlog2 = fastlog2
    37  
    38  var ParseByteCount = parseByteCount
    39  
    40  var Nanotime = nanotime
    41  var Cputicks = cputicks
    42  var CyclesPerSecond = pprof_cyclesPerSecond
    43  var NetpollBreak = netpollBreak
    44  var Usleep = usleep
    45  
    46  var PhysPageSize = physPageSize
    47  var PhysHugePageSize = physHugePageSize
    48  
    49  var NetpollGenericInit = netpollGenericInit
    50  
    51  var Memmove = memmove
    52  var MemclrNoHeapPointers = memclrNoHeapPointers
    53  
    54  var CgoCheckPointer = cgoCheckPointer
    55  
    56  const CrashStackImplemented = crashStackImplemented
    57  
    58  const TracebackInnerFrames = tracebackInnerFrames
    59  const TracebackOuterFrames = tracebackOuterFrames
    60  
    61  var LockPartialOrder = lockPartialOrder
    62  
    63  type TimeTimer = timeTimer
    64  
    65  type LockRank lockRank
    66  
    67  func (l LockRank) String() string {
    68  	return lockRank(l).String()
    69  }
    70  
    71  const PreemptMSupported = preemptMSupported
    72  
    73  type LFNode struct {
    74  	Next    uint64
    75  	Pushcnt uintptr
    76  }
    77  
    78  func LFStackPush(head *uint64, node *LFNode) {
    79  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
    80  }
    81  
    82  func LFStackPop(head *uint64) *LFNode {
    83  	return (*LFNode)((*lfstack)(head).pop())
    84  }
    85  func LFNodeValidate(node *LFNode) {
    86  	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
    87  }
    88  
    89  func Netpoll(delta int64) {
    90  	systemstack(func() {
    91  		netpoll(delta)
    92  	})
    93  }
    94  
    95  func PointerMask(x any) (ret []byte) {
    96  	systemstack(func() {
    97  		ret = pointerMask(x)
    98  	})
    99  	return
   100  }
   101  
   102  func RunSchedLocalQueueTest() {
   103  	pp := new(p)
   104  	gs := make([]g, len(pp.runq))
   105  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   106  	for i := 0; i < len(pp.runq); i++ {
   107  		if g, _ := runqget(pp); g != nil {
   108  			throw("runq is not empty initially")
   109  		}
   110  		for j := 0; j < i; j++ {
   111  			runqput(pp, &gs[i], false)
   112  		}
   113  		for j := 0; j < i; j++ {
   114  			if g, _ := runqget(pp); g != &gs[i] {
   115  				print("bad element at iter ", i, "/", j, "\n")
   116  				throw("bad element")
   117  			}
   118  		}
   119  		if g, _ := runqget(pp); g != nil {
   120  			throw("runq is not empty afterwards")
   121  		}
   122  	}
   123  }
   124  
   125  func RunSchedLocalQueueStealTest() {
   126  	p1 := new(p)
   127  	p2 := new(p)
   128  	gs := make([]g, len(p1.runq))
   129  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   130  	for i := 0; i < len(p1.runq); i++ {
   131  		for j := 0; j < i; j++ {
   132  			gs[j].sig = 0
   133  			runqput(p1, &gs[j], false)
   134  		}
   135  		gp := runqsteal(p2, p1, true)
   136  		s := 0
   137  		if gp != nil {
   138  			s++
   139  			gp.sig++
   140  		}
   141  		for {
   142  			gp, _ = runqget(p2)
   143  			if gp == nil {
   144  				break
   145  			}
   146  			s++
   147  			gp.sig++
   148  		}
   149  		for {
   150  			gp, _ = runqget(p1)
   151  			if gp == nil {
   152  				break
   153  			}
   154  			gp.sig++
   155  		}
   156  		for j := 0; j < i; j++ {
   157  			if gs[j].sig != 1 {
   158  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
   159  				throw("bad element")
   160  			}
   161  		}
   162  		if s != i/2 && s != i/2+1 {
   163  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
   164  			throw("bad steal")
   165  		}
   166  	}
   167  }
   168  
   169  func RunSchedLocalQueueEmptyTest(iters int) {
   170  	// Test that runq is not spuriously reported as empty.
   171  	// Runq emptiness affects scheduling decisions and spurious emptiness
   172  	// can lead to underutilization (both runnable Gs and idle Ps coexist
   173  	// for arbitrary long time).
   174  	done := make(chan bool, 1)
   175  	p := new(p)
   176  	gs := make([]g, 2)
   177  	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
   178  	ready := new(uint32)
   179  	for i := 0; i < iters; i++ {
   180  		*ready = 0
   181  		next0 := (i & 1) == 0
   182  		next1 := (i & 2) == 0
   183  		runqput(p, &gs[0], next0)
   184  		go func() {
   185  			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   186  			}
   187  			if runqempty(p) {
   188  				println("next:", next0, next1)
   189  				throw("queue is empty")
   190  			}
   191  			done <- true
   192  		}()
   193  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
   194  		}
   195  		runqput(p, &gs[1], next1)
   196  		runqget(p)
   197  		<-done
   198  		runqget(p)
   199  	}
   200  }
   201  
   202  var (
   203  	StringHash = stringHash
   204  	BytesHash  = bytesHash
   205  	Int32Hash  = int32Hash
   206  	Int64Hash  = int64Hash
   207  	MemHash    = memhash
   208  	MemHash32  = memhash32
   209  	MemHash64  = memhash64
   210  	EfaceHash  = efaceHash
   211  	IfaceHash  = ifaceHash
   212  )
   213  
   214  var UseAeshash = &useAeshash
   215  
   216  func MemclrBytes(b []byte) {
   217  	s := (*slice)(unsafe.Pointer(&b))
   218  	memclrNoHeapPointers(s.array, uintptr(s.len))
   219  }
   220  
   221  const HashLoad = hashLoad
   222  
   223  // entry point for testing
   224  func GostringW(w []uint16) (s string) {
   225  	systemstack(func() {
   226  		s = gostringw(&w[0])
   227  	})
   228  	return
   229  }
   230  
   231  var Open = open
   232  var Close = closefd
   233  var Read = read
   234  var Write = write
   235  
   236  func Envs() []string     { return envs }
   237  func SetEnvs(e []string) { envs = e }
   238  
   239  const PtrSize = goarch.PtrSize
   240  
   241  var ForceGCPeriod = &forcegcperiod
   242  
   243  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
   244  // the "environment" traceback level, so later calls to
   245  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
   246  func SetTracebackEnv(level string) {
   247  	setTraceback(level)
   248  	traceback_env = traceback_cache
   249  }
   250  
   251  var ReadUnaligned32 = readUnaligned32
   252  var ReadUnaligned64 = readUnaligned64
   253  
   254  func CountPagesInUse() (pagesInUse, counted uintptr) {
   255  	stw := stopTheWorld(stwForTestCountPagesInUse)
   256  
   257  	pagesInUse = mheap_.pagesInUse.Load()
   258  
   259  	for _, s := range mheap_.allspans {
   260  		if s.state.get() == mSpanInUse {
   261  			counted += s.npages
   262  		}
   263  	}
   264  
   265  	startTheWorld(stw)
   266  
   267  	return
   268  }
   269  
   270  func Fastrand() uint32          { return uint32(rand()) }
   271  func Fastrand64() uint64        { return rand() }
   272  func Fastrandn(n uint32) uint32 { return randn(n) }
   273  
   274  type ProfBuf profBuf
   275  
   276  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
   277  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
   278  }
   279  
   280  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
   281  	(*profBuf)(p).write(tag, now, hdr, stk)
   282  }
   283  
   284  const (
   285  	ProfBufBlocking    = profBufBlocking
   286  	ProfBufNonBlocking = profBufNonBlocking
   287  )
   288  
   289  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
   290  	return (*profBuf)(p).read(mode)
   291  }
   292  
   293  func (p *ProfBuf) Close() {
   294  	(*profBuf)(p).close()
   295  }
   296  
   297  type CPUStats = cpuStats
   298  
   299  func ReadCPUStats() CPUStats {
   300  	return work.cpuStats
   301  }
   302  
   303  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
   304  	stw := stopTheWorld(stwForTestReadMetricsSlow)
   305  
   306  	// Initialize the metrics beforehand because this could
   307  	// allocate and skew the stats.
   308  	metricsLock()
   309  	initMetrics()
   310  
   311  	systemstack(func() {
   312  		// Donate the racectx to g0. readMetricsLocked calls into the race detector
   313  		// via map access.
   314  		getg().racectx = getg().m.curg.racectx
   315  
   316  		// Read the metrics once before in case it allocates and skews the metrics.
   317  		// readMetricsLocked is designed to only allocate the first time it is called
   318  		// with a given slice of samples. In effect, this extra read tests that this
   319  		// remains true, since otherwise the second readMetricsLocked below could
   320  		// allocate before it returns.
   321  		readMetricsLocked(samplesp, len, cap)
   322  
   323  		// Read memstats first. It's going to flush
   324  		// the mcaches which readMetrics does not do, so
   325  		// going the other way around may result in
   326  		// inconsistent statistics.
   327  		readmemstats_m(memStats)
   328  
   329  		// Read metrics again. We need to be sure we're on the
   330  		// system stack with readmemstats_m so that we don't call into
   331  		// the stack allocator and adjust metrics between there and here.
   332  		readMetricsLocked(samplesp, len, cap)
   333  
   334  		// Undo the donation.
   335  		getg().racectx = 0
   336  	})
   337  	metricsUnlock()
   338  
   339  	startTheWorld(stw)
   340  }
   341  
   342  var DoubleCheckReadMemStats = &doubleCheckReadMemStats
   343  
   344  // ReadMemStatsSlow returns both the runtime-computed MemStats and
   345  // MemStats accumulated by scanning the heap.
   346  func ReadMemStatsSlow() (base, slow MemStats) {
   347  	stw := stopTheWorld(stwForTestReadMemStatsSlow)
   348  
   349  	// Run on the system stack to avoid stack growth allocation.
   350  	systemstack(func() {
   351  		// Make sure stats don't change.
   352  		getg().m.mallocing++
   353  
   354  		readmemstats_m(&base)
   355  
   356  		// Initialize slow from base and zero the fields we're
   357  		// recomputing.
   358  		slow = base
   359  		slow.Alloc = 0
   360  		slow.TotalAlloc = 0
   361  		slow.Mallocs = 0
   362  		slow.Frees = 0
   363  		slow.HeapReleased = 0
   364  		var bySize [gc.NumSizeClasses]struct {
   365  			Mallocs, Frees uint64
   366  		}
   367  
   368  		// Add up current allocations in spans.
   369  		for _, s := range mheap_.allspans {
   370  			if s.state.get() != mSpanInUse {
   371  				continue
   372  			}
   373  			if s.isUnusedUserArenaChunk() {
   374  				continue
   375  			}
   376  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
   377  				slow.Mallocs++
   378  				slow.Alloc += uint64(s.elemsize)
   379  			} else {
   380  				slow.Mallocs += uint64(s.allocCount)
   381  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
   382  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
   383  			}
   384  		}
   385  
   386  		// Add in frees by just reading the stats for those directly.
   387  		var m heapStatsDelta
   388  		memstats.heapStats.unsafeRead(&m)
   389  
   390  		// Collect per-sizeclass free stats.
   391  		var smallFree uint64
   392  		for i := 0; i < gc.NumSizeClasses; i++ {
   393  			slow.Frees += m.smallFreeCount[i]
   394  			bySize[i].Frees += m.smallFreeCount[i]
   395  			bySize[i].Mallocs += m.smallFreeCount[i]
   396  			smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
   397  		}
   398  		slow.Frees += m.tinyAllocCount + m.largeFreeCount
   399  		slow.Mallocs += slow.Frees
   400  
   401  		slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
   402  
   403  		for i := range slow.BySize {
   404  			slow.BySize[i].Mallocs = bySize[i].Mallocs
   405  			slow.BySize[i].Frees = bySize[i].Frees
   406  		}
   407  
   408  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
   409  			chunk := mheap_.pages.tryChunkOf(i)
   410  			if chunk == nil {
   411  				continue
   412  			}
   413  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
   414  			slow.HeapReleased += uint64(pg) * pageSize
   415  		}
   416  		for _, p := range allp {
   417  			// Only count scav bits for pages in the cache
   418  			pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
   419  			slow.HeapReleased += uint64(pg) * pageSize
   420  		}
   421  
   422  		getg().m.mallocing--
   423  	})
   424  
   425  	startTheWorld(stw)
   426  	return
   427  }
   428  
   429  // ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
   430  // and verifies that unwinding the new stack doesn't crash, even if the old
   431  // stack has been freed or reused (simulated via poisoning).
   432  func ShrinkStackAndVerifyFramePointers() {
   433  	before := stackPoisonCopy
   434  	defer func() { stackPoisonCopy = before }()
   435  	stackPoisonCopy = 1
   436  
   437  	gp := getg()
   438  	systemstack(func() {
   439  		shrinkstack(gp)
   440  	})
   441  	// If our new stack contains frame pointers into the old stack, this will
   442  	// crash because the old stack has been poisoned.
   443  	FPCallers(make([]uintptr, 1024))
   444  }
   445  
   446  // BlockOnSystemStack switches to the system stack, prints "x\n" to
   447  // stderr, and blocks in a stack containing
   448  // "runtime.blockOnSystemStackInternal".
   449  func BlockOnSystemStack() {
   450  	systemstack(blockOnSystemStackInternal)
   451  }
   452  
   453  func blockOnSystemStackInternal() {
   454  	print("x\n")
   455  	lock(&deadlock)
   456  	lock(&deadlock)
   457  }
   458  
   459  type RWMutex struct {
   460  	rw rwmutex
   461  }
   462  
   463  func (rw *RWMutex) Init() {
   464  	rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
   465  }
   466  
   467  func (rw *RWMutex) RLock() {
   468  	rw.rw.rlock()
   469  }
   470  
   471  func (rw *RWMutex) RUnlock() {
   472  	rw.rw.runlock()
   473  }
   474  
   475  func (rw *RWMutex) Lock() {
   476  	rw.rw.lock()
   477  }
   478  
   479  func (rw *RWMutex) Unlock() {
   480  	rw.rw.unlock()
   481  }
   482  
   483  func LockOSCounts() (external, internal uint32) {
   484  	gp := getg()
   485  	if gp.m.lockedExt+gp.m.lockedInt == 0 {
   486  		if gp.lockedm != 0 {
   487  			panic("lockedm on non-locked goroutine")
   488  		}
   489  	} else {
   490  		if gp.lockedm == 0 {
   491  			panic("nil lockedm on locked goroutine")
   492  		}
   493  	}
   494  	return gp.m.lockedExt, gp.m.lockedInt
   495  }
   496  
   497  //go:noinline
   498  func TracebackSystemstack(stk []uintptr, i int) int {
   499  	if i == 0 {
   500  		pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
   501  		var u unwinder
   502  		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
   503  		return tracebackPCs(&u, 0, stk)
   504  	}
   505  	n := 0
   506  	systemstack(func() {
   507  		n = TracebackSystemstack(stk, i-1)
   508  	})
   509  	return n
   510  }
   511  
   512  func KeepNArenaHints(n int) {
   513  	hint := mheap_.arenaHints
   514  	for i := 1; i < n; i++ {
   515  		hint = hint.next
   516  		if hint == nil {
   517  			return
   518  		}
   519  	}
   520  	hint.next = nil
   521  }
   522  
   523  // MapNextArenaHint reserves a page at the next arena growth hint,
   524  // preventing the arena from growing there, and returns the range of
   525  // addresses that are no longer viable.
   526  //
   527  // This may fail to reserve memory. If it fails, it still returns the
   528  // address range it attempted to reserve.
   529  func MapNextArenaHint() (start, end uintptr, ok bool) {
   530  	hint := mheap_.arenaHints
   531  	addr := hint.addr
   532  	if hint.down {
   533  		start, end = addr-heapArenaBytes, addr
   534  		addr -= physPageSize
   535  	} else {
   536  		start, end = addr, addr+heapArenaBytes
   537  	}
   538  	got := sysReserve(unsafe.Pointer(addr), physPageSize, "")
   539  	ok = (addr == uintptr(got))
   540  	if !ok {
   541  		// We were unable to get the requested reservation.
   542  		// Release what we did get and fail.
   543  		sysFreeOS(got, physPageSize)
   544  	}
   545  	return
   546  }
   547  
   548  func GetNextArenaHint() uintptr {
   549  	return mheap_.arenaHints.addr
   550  }
   551  
   552  type G = g
   553  
   554  type Sudog = sudog
   555  
   556  type XRegPerG = xRegPerG
   557  
   558  func Getg() *G {
   559  	return getg()
   560  }
   561  
   562  func Goid() uint64 {
   563  	return getg().goid
   564  }
   565  
   566  func GIsWaitingOnMutex(gp *G) bool {
   567  	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
   568  }
   569  
   570  var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
   571  
   572  //go:noinline
   573  func PanicForTesting(b []byte, i int) byte {
   574  	return unexportedPanicForTesting(b, i)
   575  }
   576  
   577  //go:noinline
   578  func unexportedPanicForTesting(b []byte, i int) byte {
   579  	return b[i]
   580  }
   581  
   582  func G0StackOverflow() {
   583  	systemstack(func() {
   584  		g0 := getg()
   585  		sp := sys.GetCallerSP()
   586  		// The stack bounds for g0 stack is not always precise.
   587  		// Use an artificially small stack, to trigger a stack overflow
   588  		// without actually run out of the system stack (which may seg fault).
   589  		g0.stack.lo = sp - 4096 - stackSystem
   590  		g0.stackguard0 = g0.stack.lo + stackGuard
   591  		g0.stackguard1 = g0.stackguard0
   592  
   593  		stackOverflow(nil)
   594  	})
   595  }
   596  
   597  func stackOverflow(x *byte) {
   598  	var buf [256]byte
   599  	stackOverflow(&buf[0])
   600  }
   601  
   602  func RunGetgThreadSwitchTest() {
   603  	// Test that getg works correctly with thread switch.
   604  	// With gccgo, if we generate getg inlined, the backend
   605  	// may cache the address of the TLS variable, which
   606  	// will become invalid after a thread switch. This test
   607  	// checks that the bad caching doesn't happen.
   608  
   609  	ch := make(chan int)
   610  	go func(ch chan int) {
   611  		ch <- 5
   612  		LockOSThread()
   613  	}(ch)
   614  
   615  	g1 := getg()
   616  
   617  	// Block on a receive. This is likely to get us a thread
   618  	// switch. If we yield to the sender goroutine, it will
   619  	// lock the thread, forcing us to resume on a different
   620  	// thread.
   621  	<-ch
   622  
   623  	g2 := getg()
   624  	if g1 != g2 {
   625  		panic("g1 != g2")
   626  	}
   627  
   628  	// Also test getg after some control flow, as the
   629  	// backend is sensitive to control flow.
   630  	g3 := getg()
   631  	if g1 != g3 {
   632  		panic("g1 != g3")
   633  	}
   634  }
   635  
   636  const (
   637  	PageSize         = pageSize
   638  	PallocChunkPages = pallocChunkPages
   639  	PageAlloc64Bit   = pageAlloc64Bit
   640  	PallocSumBytes   = pallocSumBytes
   641  )
   642  
   643  // Expose pallocSum for testing.
   644  type PallocSum pallocSum
   645  
   646  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
   647  func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
   648  func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
   649  func (m PallocSum) End() uint                      { return pallocSum(m).end() }
   650  
   651  // Expose pallocBits for testing.
   652  type PallocBits pallocBits
   653  
   654  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
   655  	return (*pallocBits)(b).find(npages, searchIdx)
   656  }
   657  func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
   658  func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
   659  func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
   660  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
   661  
   662  // SummarizeSlow is a slow but more obviously correct implementation
   663  // of (*pallocBits).summarize. Used for testing.
   664  func SummarizeSlow(b *PallocBits) PallocSum {
   665  	var start, most, end uint
   666  
   667  	const N = uint(len(b)) * 64
   668  	for start < N && (*pageBits)(b).get(start) == 0 {
   669  		start++
   670  	}
   671  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
   672  		end++
   673  	}
   674  	run := uint(0)
   675  	for i := uint(0); i < N; i++ {
   676  		if (*pageBits)(b).get(i) == 0 {
   677  			run++
   678  		} else {
   679  			run = 0
   680  		}
   681  		most = max(most, run)
   682  	}
   683  	return PackPallocSum(start, most, end)
   684  }
   685  
   686  // Expose non-trivial helpers for testing.
   687  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
   688  
   689  // Given two PallocBits, returns a set of bit ranges where
   690  // they differ.
   691  func DiffPallocBits(a, b *PallocBits) []BitRange {
   692  	ba := (*pageBits)(a)
   693  	bb := (*pageBits)(b)
   694  
   695  	var d []BitRange
   696  	base, size := uint(0), uint(0)
   697  	for i := uint(0); i < uint(len(ba))*64; i++ {
   698  		if ba.get(i) != bb.get(i) {
   699  			if size == 0 {
   700  				base = i
   701  			}
   702  			size++
   703  		} else {
   704  			if size != 0 {
   705  				d = append(d, BitRange{base, size})
   706  			}
   707  			size = 0
   708  		}
   709  	}
   710  	if size != 0 {
   711  		d = append(d, BitRange{base, size})
   712  	}
   713  	return d
   714  }
   715  
   716  // StringifyPallocBits gets the bits in the bit range r from b,
   717  // and returns a string containing the bits as ASCII 0 and 1
   718  // characters.
   719  func StringifyPallocBits(b *PallocBits, r BitRange) string {
   720  	str := ""
   721  	for j := r.I; j < r.I+r.N; j++ {
   722  		if (*pageBits)(b).get(j) != 0 {
   723  			str += "1"
   724  		} else {
   725  			str += "0"
   726  		}
   727  	}
   728  	return str
   729  }
   730  
   731  // Expose pallocData for testing.
   732  type PallocData pallocData
   733  
   734  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
   735  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
   736  }
   737  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
   738  func (d *PallocData) ScavengedSetRange(i, n uint) {
   739  	(*pallocData)(d).scavenged.setRange(i, n)
   740  }
   741  func (d *PallocData) PallocBits() *PallocBits {
   742  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
   743  }
   744  func (d *PallocData) Scavenged() *PallocBits {
   745  	return (*PallocBits)(&(*pallocData)(d).scavenged)
   746  }
   747  
   748  // Expose fillAligned for testing.
   749  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
   750  
   751  // Expose pageCache for testing.
   752  type PageCache pageCache
   753  
   754  const PageCachePages = pageCachePages
   755  
   756  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
   757  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
   758  }
   759  func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
   760  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
   761  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
   762  func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
   763  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
   764  	return (*pageCache)(c).alloc(npages)
   765  }
   766  func (c *PageCache) Flush(s *PageAlloc) {
   767  	cp := (*pageCache)(c)
   768  	sp := (*pageAlloc)(s)
   769  
   770  	systemstack(func() {
   771  		// None of the tests need any higher-level locking, so we just
   772  		// take the lock internally.
   773  		lock(sp.mheapLock)
   774  		cp.flush(sp)
   775  		unlock(sp.mheapLock)
   776  	})
   777  }
   778  
   779  // Expose chunk index type.
   780  type ChunkIdx chunkIdx
   781  
   782  // Expose pageAlloc for testing. Note that because pageAlloc is
   783  // not in the heap, so is PageAlloc.
   784  type PageAlloc pageAlloc
   785  
   786  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
   787  	pp := (*pageAlloc)(p)
   788  
   789  	var addr, scav uintptr
   790  	systemstack(func() {
   791  		// None of the tests need any higher-level locking, so we just
   792  		// take the lock internally.
   793  		lock(pp.mheapLock)
   794  		addr, scav = pp.alloc(npages)
   795  		unlock(pp.mheapLock)
   796  	})
   797  	return addr, scav
   798  }
   799  func (p *PageAlloc) AllocToCache() PageCache {
   800  	pp := (*pageAlloc)(p)
   801  
   802  	var c PageCache
   803  	systemstack(func() {
   804  		// None of the tests need any higher-level locking, so we just
   805  		// take the lock internally.
   806  		lock(pp.mheapLock)
   807  		c = PageCache(pp.allocToCache())
   808  		unlock(pp.mheapLock)
   809  	})
   810  	return c
   811  }
   812  func (p *PageAlloc) Free(base, npages uintptr) {
   813  	pp := (*pageAlloc)(p)
   814  
   815  	systemstack(func() {
   816  		// None of the tests need any higher-level locking, so we just
   817  		// take the lock internally.
   818  		lock(pp.mheapLock)
   819  		pp.free(base, npages)
   820  		unlock(pp.mheapLock)
   821  	})
   822  }
   823  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
   824  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
   825  }
   826  func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
   827  	pp := (*pageAlloc)(p)
   828  	systemstack(func() {
   829  		r = pp.scavenge(nbytes, nil, true)
   830  	})
   831  	return
   832  }
   833  func (p *PageAlloc) InUse() []AddrRange {
   834  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
   835  	for _, r := range p.inUse.ranges {
   836  		ranges = append(ranges, AddrRange{r})
   837  	}
   838  	return ranges
   839  }
   840  
   841  // Returns nil if the PallocData's L2 is missing.
   842  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
   843  	ci := chunkIdx(i)
   844  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
   845  }
   846  
   847  // AddrRange is a wrapper around addrRange for testing.
   848  type AddrRange struct {
   849  	addrRange
   850  }
   851  
   852  // MakeAddrRange creates a new address range.
   853  func MakeAddrRange(base, limit uintptr) AddrRange {
   854  	return AddrRange{makeAddrRange(base, limit)}
   855  }
   856  
   857  // Base returns the virtual base address of the address range.
   858  func (a AddrRange) Base() uintptr {
   859  	return a.addrRange.base.addr()
   860  }
   861  
   862  // Base returns the virtual address of the limit of the address range.
   863  func (a AddrRange) Limit() uintptr {
   864  	return a.addrRange.limit.addr()
   865  }
   866  
   867  // Equals returns true if the two address ranges are exactly equal.
   868  func (a AddrRange) Equals(b AddrRange) bool {
   869  	return a == b
   870  }
   871  
   872  // Size returns the size in bytes of the address range.
   873  func (a AddrRange) Size() uintptr {
   874  	return a.addrRange.size()
   875  }
   876  
   877  // testSysStat is the sysStat passed to test versions of various
   878  // runtime structures. We do actually have to keep track of this
   879  // because otherwise memstats.mappedReady won't actually line up
   880  // with other stats in the runtime during tests.
   881  var testSysStat = &memstats.other_sys
   882  
   883  // AddrRanges is a wrapper around addrRanges for testing.
   884  type AddrRanges struct {
   885  	addrRanges
   886  	mutable bool
   887  }
   888  
   889  // NewAddrRanges creates a new empty addrRanges.
   890  //
   891  // Note that this initializes addrRanges just like in the
   892  // runtime, so its memory is persistentalloc'd. Call this
   893  // function sparingly since the memory it allocates is
   894  // leaked.
   895  //
   896  // This AddrRanges is mutable, so we can test methods like
   897  // Add.
   898  func NewAddrRanges() AddrRanges {
   899  	r := addrRanges{}
   900  	r.init(testSysStat)
   901  	return AddrRanges{r, true}
   902  }
   903  
   904  // MakeAddrRanges creates a new addrRanges populated with
   905  // the ranges in a.
   906  //
   907  // The returned AddrRanges is immutable, so methods like
   908  // Add will fail.
   909  func MakeAddrRanges(a ...AddrRange) AddrRanges {
   910  	// Methods that manipulate the backing store of addrRanges.ranges should
   911  	// not be used on the result from this function (e.g. add) since they may
   912  	// trigger reallocation. That would normally be fine, except the new
   913  	// backing store won't come from the heap, but from persistentalloc, so
   914  	// we'll leak some memory implicitly.
   915  	ranges := make([]addrRange, 0, len(a))
   916  	total := uintptr(0)
   917  	for _, r := range a {
   918  		ranges = append(ranges, r.addrRange)
   919  		total += r.Size()
   920  	}
   921  	return AddrRanges{addrRanges{
   922  		ranges:     ranges,
   923  		totalBytes: total,
   924  		sysStat:    testSysStat,
   925  	}, false}
   926  }
   927  
   928  // Ranges returns a copy of the ranges described by the
   929  // addrRanges.
   930  func (a *AddrRanges) Ranges() []AddrRange {
   931  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
   932  	for _, r := range a.addrRanges.ranges {
   933  		result = append(result, AddrRange{r})
   934  	}
   935  	return result
   936  }
   937  
   938  // FindSucc returns the successor to base. See addrRanges.findSucc
   939  // for more details.
   940  func (a *AddrRanges) FindSucc(base uintptr) int {
   941  	return a.findSucc(base)
   942  }
   943  
   944  // Add adds a new AddrRange to the AddrRanges.
   945  //
   946  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
   947  // otherwise this method will throw.
   948  func (a *AddrRanges) Add(r AddrRange) {
   949  	if !a.mutable {
   950  		throw("attempt to mutate immutable AddrRanges")
   951  	}
   952  	a.add(r.addrRange)
   953  }
   954  
   955  // TotalBytes returns the totalBytes field of the addrRanges.
   956  func (a *AddrRanges) TotalBytes() uintptr {
   957  	return a.addrRanges.totalBytes
   958  }
   959  
   960  // BitRange represents a range over a bitmap.
   961  type BitRange struct {
   962  	I, N uint // bit index and length in bits
   963  }
   964  
   965  // NewPageAlloc creates a new page allocator for testing and
   966  // initializes it with the scav and chunks maps. Each key in these maps
   967  // represents a chunk index and each value is a series of bit ranges to
   968  // set within each bitmap's chunk.
   969  //
   970  // The initialization of the pageAlloc preserves the invariant that if a
   971  // scavenged bit is set the alloc bit is necessarily unset, so some
   972  // of the bits described by scav may be cleared in the final bitmap if
   973  // ranges in chunks overlap with them.
   974  //
   975  // scav is optional, and if nil, the scavenged bitmap will be cleared
   976  // (as opposed to all 1s, which it usually is). Furthermore, every
   977  // chunk index in scav must appear in chunks; ones that do not are
   978  // ignored.
   979  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
   980  	p := new(pageAlloc)
   981  
   982  	// We've got an entry, so initialize the pageAlloc.
   983  	p.init(new(mutex), testSysStat, true)
   984  	lockInit(p.mheapLock, lockRankMheap)
   985  	for i, init := range chunks {
   986  		addr := chunkBase(chunkIdx(i))
   987  
   988  		// Mark the chunk's existence in the pageAlloc.
   989  		systemstack(func() {
   990  			lock(p.mheapLock)
   991  			p.grow(addr, pallocChunkBytes)
   992  			unlock(p.mheapLock)
   993  		})
   994  
   995  		// Initialize the bitmap and update pageAlloc metadata.
   996  		ci := chunkIndex(addr)
   997  		chunk := p.chunkOf(ci)
   998  
   999  		// Clear all the scavenged bits which grow set.
  1000  		chunk.scavenged.clearRange(0, pallocChunkPages)
  1001  
  1002  		// Simulate the allocation and subsequent free of all pages in
  1003  		// the chunk for the scavenge index. This sets the state equivalent
  1004  		// with all pages within the index being free.
  1005  		p.scav.index.alloc(ci, pallocChunkPages)
  1006  		p.scav.index.free(ci, 0, pallocChunkPages)
  1007  
  1008  		// Apply scavenge state if applicable.
  1009  		if scav != nil {
  1010  			if scvg, ok := scav[i]; ok {
  1011  				for _, s := range scvg {
  1012  					// Ignore the case of s.N == 0. setRange doesn't handle
  1013  					// it and it's a no-op anyway.
  1014  					if s.N != 0 {
  1015  						chunk.scavenged.setRange(s.I, s.N)
  1016  					}
  1017  				}
  1018  			}
  1019  		}
  1020  
  1021  		// Apply alloc state.
  1022  		for _, s := range init {
  1023  			// Ignore the case of s.N == 0. allocRange doesn't handle
  1024  			// it and it's a no-op anyway.
  1025  			if s.N != 0 {
  1026  				chunk.allocRange(s.I, s.N)
  1027  
  1028  				// Make sure the scavenge index is updated.
  1029  				p.scav.index.alloc(ci, s.N)
  1030  			}
  1031  		}
  1032  
  1033  		// Update heap metadata for the allocRange calls above.
  1034  		systemstack(func() {
  1035  			lock(p.mheapLock)
  1036  			p.update(addr, pallocChunkPages, false, false)
  1037  			unlock(p.mheapLock)
  1038  		})
  1039  	}
  1040  
  1041  	return (*PageAlloc)(p)
  1042  }
  1043  
  1044  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
  1045  // is called the pageAlloc may no longer be used. The object itself will be
  1046  // collected by the garbage collector once it is no longer live.
  1047  func FreePageAlloc(pp *PageAlloc) {
  1048  	p := (*pageAlloc)(pp)
  1049  
  1050  	// Free all the mapped space for the summary levels.
  1051  	if pageAlloc64Bit != 0 {
  1052  		for l := 0; l < summaryLevels; l++ {
  1053  			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
  1054  		}
  1055  	} else {
  1056  		resSize := uintptr(0)
  1057  		for _, s := range p.summary {
  1058  			resSize += uintptr(cap(s)) * pallocSumBytes
  1059  		}
  1060  		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
  1061  	}
  1062  
  1063  	// Free extra data structures.
  1064  	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
  1065  
  1066  	// Subtract back out whatever we mapped for the summaries.
  1067  	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
  1068  	// (and in anger should actually be accounted for), and there's no other
  1069  	// way to figure out how much we actually mapped.
  1070  	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
  1071  	testSysStat.add(-int64(p.summaryMappedReady))
  1072  
  1073  	// Free the mapped space for chunks.
  1074  	for i := range p.chunks {
  1075  		if x := p.chunks[i]; x != nil {
  1076  			p.chunks[i] = nil
  1077  			// This memory comes from sysAlloc and will always be page-aligned.
  1078  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
  1079  		}
  1080  	}
  1081  }
  1082  
  1083  // BaseChunkIdx is a convenient chunkIdx value which works on both
  1084  // 64 bit and 32 bit platforms, allowing the tests to share code
  1085  // between the two.
  1086  //
  1087  // This should not be higher than 0x100*pallocChunkBytes to support
  1088  // mips and mipsle, which only have 31-bit address spaces.
  1089  var BaseChunkIdx = func() ChunkIdx {
  1090  	var prefix uintptr
  1091  	if pageAlloc64Bit != 0 {
  1092  		prefix = 0xc000
  1093  	} else {
  1094  		prefix = 0x100
  1095  	}
  1096  	baseAddr := prefix * pallocChunkBytes
  1097  	if goos.IsAix != 0 {
  1098  		baseAddr += arenaBaseOffset
  1099  	}
  1100  	return ChunkIdx(chunkIndex(baseAddr))
  1101  }()
  1102  
  1103  // PageBase returns an address given a chunk index and a page index
  1104  // relative to that chunk.
  1105  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
  1106  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
  1107  }
  1108  
  1109  type BitsMismatch struct {
  1110  	Base      uintptr
  1111  	Got, Want uint64
  1112  }
  1113  
  1114  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
  1115  	ok = true
  1116  
  1117  	// Run on the system stack to avoid stack growth allocation.
  1118  	systemstack(func() {
  1119  		getg().m.mallocing++
  1120  
  1121  		// Lock so that we can safely access the bitmap.
  1122  		lock(&mheap_.lock)
  1123  
  1124  	chunkLoop:
  1125  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
  1126  			chunk := mheap_.pages.tryChunkOf(i)
  1127  			if chunk == nil {
  1128  				continue
  1129  			}
  1130  			cb := chunkBase(i)
  1131  			for j := 0; j < pallocChunkPages/64; j++ {
  1132  				// Run over each 64-bit bitmap section and ensure
  1133  				// scavenged is being cleared properly on allocation.
  1134  				// If a used bit and scavenged bit are both set, that's
  1135  				// an error, and could indicate a larger problem, or
  1136  				// an accounting problem.
  1137  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
  1138  				got := chunk.scavenged[j]
  1139  				if want != got {
  1140  					ok = false
  1141  					if n >= len(mismatches) {
  1142  						break chunkLoop
  1143  					}
  1144  					mismatches[n] = BitsMismatch{
  1145  						Base: cb + uintptr(j)*64*pageSize,
  1146  						Got:  got,
  1147  						Want: want,
  1148  					}
  1149  					n++
  1150  				}
  1151  			}
  1152  		}
  1153  		unlock(&mheap_.lock)
  1154  
  1155  		getg().m.mallocing--
  1156  	})
  1157  
  1158  	if randomizeHeapBase && len(mismatches) > 0 {
  1159  		// When goexperiment.RandomizedHeapBase64 is set we use a series of
  1160  		// padding pages to generate randomized heap base address which have
  1161  		// both the alloc and scav bits set. Because of this we expect exactly
  1162  		// one arena will have mismatches, so check for that explicitly and
  1163  		// remove the mismatches if that property holds. If we see more than one
  1164  		// arena with this property, that is an indication something has
  1165  		// actually gone wrong, so return the mismatches.
  1166  		//
  1167  		// We do this, instead of ignoring the mismatches in the chunkLoop, because
  1168  		// it's not easy to determine which arena we added the padding pages to
  1169  		// programmatically, without explicitly recording the base address somewhere
  1170  		// in a global variable (which we'd rather not do as the address of that variable
  1171  		// is likely to be somewhat predictable, potentially defeating the purpose
  1172  		// of our randomization).
  1173  		affectedArenas := map[arenaIdx]bool{}
  1174  		for _, mismatch := range mismatches {
  1175  			if mismatch.Base > 0 {
  1176  				affectedArenas[arenaIndex(mismatch.Base)] = true
  1177  			}
  1178  		}
  1179  		if len(affectedArenas) == 1 {
  1180  			ok = true
  1181  			// zero the mismatches
  1182  			for i := range n {
  1183  				mismatches[i] = BitsMismatch{}
  1184  			}
  1185  		}
  1186  	}
  1187  
  1188  	return
  1189  }
  1190  
  1191  func PageCachePagesLeaked() (leaked uintptr) {
  1192  	stw := stopTheWorld(stwForTestPageCachePagesLeaked)
  1193  
  1194  	// Walk over destroyed Ps and look for unflushed caches.
  1195  	deadp := allp[len(allp):cap(allp)]
  1196  	for _, p := range deadp {
  1197  		// Since we're going past len(allp) we may see nil Ps.
  1198  		// Just ignore them.
  1199  		if p != nil {
  1200  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
  1201  		}
  1202  	}
  1203  
  1204  	startTheWorld(stw)
  1205  	return
  1206  }
  1207  
  1208  var ProcYield = procyield
  1209  var OSYield = osyield
  1210  
  1211  type Mutex = mutex
  1212  
  1213  var Lock = lock
  1214  var Unlock = unlock
  1215  
  1216  var MutexContended = mutexContended
  1217  
  1218  func SemRootLock(addr *uint32) *mutex {
  1219  	root := semtable.rootFor(addr)
  1220  	return &root.lock
  1221  }
  1222  
  1223  var Semacquire = semacquire
  1224  var Semrelease1 = semrelease1
  1225  
  1226  func SemNwait(addr *uint32) uint32 {
  1227  	root := semtable.rootFor(addr)
  1228  	return root.nwait.Load()
  1229  }
  1230  
  1231  const SemTableSize = semTabSize
  1232  
  1233  // SemTable is a wrapper around semTable exported for testing.
  1234  type SemTable struct {
  1235  	semTable
  1236  }
  1237  
  1238  // Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
  1239  func (t *SemTable) Enqueue(addr *uint32) {
  1240  	s := acquireSudog()
  1241  	s.releasetime = 0
  1242  	s.acquiretime = 0
  1243  	s.ticket = 0
  1244  	t.semTable.rootFor(addr).queue(addr, s, false)
  1245  }
  1246  
  1247  // Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
  1248  //
  1249  // Returns true if there actually was a waiter to be dequeued.
  1250  func (t *SemTable) Dequeue(addr *uint32) bool {
  1251  	s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
  1252  	if s != nil {
  1253  		releaseSudog(s)
  1254  		return true
  1255  	}
  1256  	return false
  1257  }
  1258  
  1259  // mspan wrapper for testing.
  1260  type MSpan mspan
  1261  
  1262  // Allocate an mspan for testing.
  1263  func AllocMSpan() *MSpan {
  1264  	var s *mspan
  1265  	systemstack(func() {
  1266  		lock(&mheap_.lock)
  1267  		s = (*mspan)(mheap_.spanalloc.alloc())
  1268  		s.init(0, 0)
  1269  		unlock(&mheap_.lock)
  1270  	})
  1271  	return (*MSpan)(s)
  1272  }
  1273  
  1274  // Free an allocated mspan.
  1275  func FreeMSpan(s *MSpan) {
  1276  	systemstack(func() {
  1277  		lock(&mheap_.lock)
  1278  		mheap_.spanalloc.free(unsafe.Pointer(s))
  1279  		unlock(&mheap_.lock)
  1280  	})
  1281  }
  1282  
  1283  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
  1284  	s := (*mspan)(ms)
  1285  	s.nelems = uint16(len(bits) * 8)
  1286  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
  1287  	result := s.countAlloc()
  1288  	s.gcmarkBits = nil
  1289  	return result
  1290  }
  1291  
  1292  type MSpanQueue mSpanQueue
  1293  
  1294  func (q *MSpanQueue) Size() int {
  1295  	return (*mSpanQueue)(q).n
  1296  }
  1297  
  1298  func (q *MSpanQueue) Push(s *MSpan) {
  1299  	(*mSpanQueue)(q).push((*mspan)(s))
  1300  }
  1301  
  1302  func (q *MSpanQueue) Pop() *MSpan {
  1303  	s := (*mSpanQueue)(q).pop()
  1304  	return (*MSpan)(s)
  1305  }
  1306  
  1307  func (q *MSpanQueue) TakeAll(p *MSpanQueue) {
  1308  	(*mSpanQueue)(q).takeAll((*mSpanQueue)(p))
  1309  }
  1310  
  1311  func (q *MSpanQueue) PopN(n int) MSpanQueue {
  1312  	p := (*mSpanQueue)(q).popN(n)
  1313  	return (MSpanQueue)(p)
  1314  }
  1315  
  1316  const (
  1317  	TimeHistSubBucketBits = timeHistSubBucketBits
  1318  	TimeHistNumSubBuckets = timeHistNumSubBuckets
  1319  	TimeHistNumBuckets    = timeHistNumBuckets
  1320  	TimeHistMinBucketBits = timeHistMinBucketBits
  1321  	TimeHistMaxBucketBits = timeHistMaxBucketBits
  1322  )
  1323  
  1324  type TimeHistogram timeHistogram
  1325  
  1326  // Count returns the counts for the given bucket, subBucket indices.
  1327  // Returns true if the bucket was valid, otherwise returns the counts
  1328  // for the overflow bucket if bucket > 0 or the underflow bucket if
  1329  // bucket < 0, and false.
  1330  func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
  1331  	t := (*timeHistogram)(th)
  1332  	if bucket < 0 {
  1333  		return t.underflow.Load(), false
  1334  	}
  1335  	i := bucket*TimeHistNumSubBuckets + subBucket
  1336  	if i >= len(t.counts) {
  1337  		return t.overflow.Load(), false
  1338  	}
  1339  	return t.counts[i].Load(), true
  1340  }
  1341  
  1342  func (th *TimeHistogram) Record(duration int64) {
  1343  	(*timeHistogram)(th).record(duration)
  1344  }
  1345  
  1346  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
  1347  
  1348  func SetIntArgRegs(a int) int {
  1349  	lock(&finlock)
  1350  	old := intArgRegs
  1351  	if a >= 0 {
  1352  		intArgRegs = a
  1353  	}
  1354  	unlock(&finlock)
  1355  	return old
  1356  }
  1357  
  1358  func FinalizerGAsleep() bool {
  1359  	return fingStatus.Load()&fingWait != 0
  1360  }
  1361  
  1362  // For GCTestMoveStackOnNextCall, it's important not to introduce an
  1363  // extra layer of call, since then there's a return before the "real"
  1364  // next call.
  1365  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
  1366  
  1367  // For GCTestIsReachable, it's important that we do this as a call so
  1368  // escape analysis can see through it.
  1369  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
  1370  	return gcTestIsReachable(ptrs...)
  1371  }
  1372  
  1373  // For GCTestPointerClass, it's important that we do this as a call so
  1374  // escape analysis can see through it.
  1375  //
  1376  // This is nosplit because gcTestPointerClass is.
  1377  //
  1378  //go:nosplit
  1379  func GCTestPointerClass(p unsafe.Pointer) string {
  1380  	return gcTestPointerClass(p)
  1381  }
  1382  
  1383  const Raceenabled = raceenabled
  1384  
  1385  const (
  1386  	GCBackgroundUtilization            = gcBackgroundUtilization
  1387  	GCGoalUtilization                  = gcGoalUtilization
  1388  	DefaultHeapMinimum                 = defaultHeapMinimum
  1389  	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
  1390  	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
  1391  )
  1392  
  1393  type GCController struct {
  1394  	gcControllerState
  1395  }
  1396  
  1397  func NewGCController(gcPercent int, memoryLimit int64) *GCController {
  1398  	// Force the controller to escape. We're going to
  1399  	// do 64-bit atomics on it, and if it gets stack-allocated
  1400  	// on a 32-bit architecture, it may get allocated unaligned
  1401  	// space.
  1402  	g := Escape(new(GCController))
  1403  	g.gcControllerState.test = true // Mark it as a test copy.
  1404  	g.init(int32(gcPercent), memoryLimit)
  1405  	return g
  1406  }
  1407  
  1408  func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
  1409  	trigger, _ := c.trigger()
  1410  	if c.heapMarked > trigger {
  1411  		trigger = c.heapMarked
  1412  	}
  1413  	c.maxStackScan.Store(stackSize)
  1414  	c.globalsScan.Store(globalsSize)
  1415  	c.heapLive.Store(trigger)
  1416  	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
  1417  	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
  1418  }
  1419  
  1420  func (c *GCController) AssistWorkPerByte() float64 {
  1421  	return c.assistWorkPerByte.Load()
  1422  }
  1423  
  1424  func (c *GCController) HeapGoal() uint64 {
  1425  	return c.heapGoal()
  1426  }
  1427  
  1428  func (c *GCController) HeapLive() uint64 {
  1429  	return c.heapLive.Load()
  1430  }
  1431  
  1432  func (c *GCController) HeapMarked() uint64 {
  1433  	return c.heapMarked
  1434  }
  1435  
  1436  func (c *GCController) Triggered() uint64 {
  1437  	return c.triggered
  1438  }
  1439  
  1440  type GCControllerReviseDelta struct {
  1441  	HeapLive        int64
  1442  	HeapScan        int64
  1443  	HeapScanWork    int64
  1444  	StackScanWork   int64
  1445  	GlobalsScanWork int64
  1446  }
  1447  
  1448  func (c *GCController) Revise(d GCControllerReviseDelta) {
  1449  	c.heapLive.Add(d.HeapLive)
  1450  	c.heapScan.Add(d.HeapScan)
  1451  	c.heapScanWork.Add(d.HeapScanWork)
  1452  	c.stackScanWork.Add(d.StackScanWork)
  1453  	c.globalsScanWork.Add(d.GlobalsScanWork)
  1454  	c.revise()
  1455  }
  1456  
  1457  func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
  1458  	c.assistTime.Store(assistTime)
  1459  	c.endCycle(elapsed, gomaxprocs, false)
  1460  	c.resetLive(bytesMarked)
  1461  	c.commit(false)
  1462  }
  1463  
  1464  func (c *GCController) AddIdleMarkWorker() bool {
  1465  	return c.addIdleMarkWorker()
  1466  }
  1467  
  1468  func (c *GCController) NeedIdleMarkWorker() bool {
  1469  	return c.needIdleMarkWorker()
  1470  }
  1471  
  1472  func (c *GCController) RemoveIdleMarkWorker() {
  1473  	c.removeIdleMarkWorker()
  1474  }
  1475  
  1476  func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
  1477  	c.setMaxIdleMarkWorkers(max)
  1478  }
  1479  
  1480  var alwaysFalse bool
  1481  var escapeSink any
  1482  
  1483  func Escape[T any](x T) T {
  1484  	if alwaysFalse {
  1485  		escapeSink = x
  1486  	}
  1487  	return x
  1488  }
  1489  
  1490  // Acquirem blocks preemption.
  1491  func Acquirem() {
  1492  	acquirem()
  1493  }
  1494  
  1495  func Releasem() {
  1496  	releasem(getg().m)
  1497  }
  1498  
  1499  var Timediv = timediv
  1500  
  1501  type PIController struct {
  1502  	piController
  1503  }
  1504  
  1505  func NewPIController(kp, ti, tt, min, max float64) *PIController {
  1506  	return &PIController{piController{
  1507  		kp:  kp,
  1508  		ti:  ti,
  1509  		tt:  tt,
  1510  		min: min,
  1511  		max: max,
  1512  	}}
  1513  }
  1514  
  1515  func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
  1516  	return c.piController.next(input, setpoint, period)
  1517  }
  1518  
  1519  const (
  1520  	CapacityPerProc          = capacityPerProc
  1521  	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
  1522  )
  1523  
  1524  type GCCPULimiter struct {
  1525  	limiter gcCPULimiterState
  1526  }
  1527  
  1528  func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
  1529  	// Force the controller to escape. We're going to
  1530  	// do 64-bit atomics on it, and if it gets stack-allocated
  1531  	// on a 32-bit architecture, it may get allocated unaligned
  1532  	// space.
  1533  	l := Escape(new(GCCPULimiter))
  1534  	l.limiter.test = true
  1535  	l.limiter.resetCapacity(now, gomaxprocs)
  1536  	return l
  1537  }
  1538  
  1539  func (l *GCCPULimiter) Fill() uint64 {
  1540  	return l.limiter.bucket.fill
  1541  }
  1542  
  1543  func (l *GCCPULimiter) Capacity() uint64 {
  1544  	return l.limiter.bucket.capacity
  1545  }
  1546  
  1547  func (l *GCCPULimiter) Overflow() uint64 {
  1548  	return l.limiter.overflow
  1549  }
  1550  
  1551  func (l *GCCPULimiter) Limiting() bool {
  1552  	return l.limiter.limiting()
  1553  }
  1554  
  1555  func (l *GCCPULimiter) NeedUpdate(now int64) bool {
  1556  	return l.limiter.needUpdate(now)
  1557  }
  1558  
  1559  func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
  1560  	l.limiter.startGCTransition(enableGC, now)
  1561  }
  1562  
  1563  func (l *GCCPULimiter) FinishGCTransition(now int64) {
  1564  	l.limiter.finishGCTransition(now)
  1565  }
  1566  
  1567  func (l *GCCPULimiter) Update(now int64) {
  1568  	l.limiter.update(now)
  1569  }
  1570  
  1571  func (l *GCCPULimiter) AddAssistTime(t int64) {
  1572  	l.limiter.addAssistTime(t)
  1573  }
  1574  
  1575  func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
  1576  	l.limiter.resetCapacity(now, nprocs)
  1577  }
  1578  
  1579  const ScavengePercent = scavengePercent
  1580  
  1581  type Scavenger struct {
  1582  	Sleep      func(int64) int64
  1583  	Scavenge   func(uintptr) (uintptr, int64)
  1584  	ShouldStop func() bool
  1585  	GoMaxProcs func() int32
  1586  
  1587  	released  atomic.Uintptr
  1588  	scavenger scavengerState
  1589  	stop      chan<- struct{}
  1590  	done      <-chan struct{}
  1591  }
  1592  
  1593  func (s *Scavenger) Start() {
  1594  	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
  1595  		panic("must populate all stubs")
  1596  	}
  1597  
  1598  	// Install hooks.
  1599  	s.scavenger.sleepStub = s.Sleep
  1600  	s.scavenger.scavenge = s.Scavenge
  1601  	s.scavenger.shouldStop = s.ShouldStop
  1602  	s.scavenger.gomaxprocs = s.GoMaxProcs
  1603  
  1604  	// Start up scavenger goroutine, and wait for it to be ready.
  1605  	stop := make(chan struct{})
  1606  	s.stop = stop
  1607  	done := make(chan struct{})
  1608  	s.done = done
  1609  	go func() {
  1610  		// This should match bgscavenge, loosely.
  1611  		s.scavenger.init()
  1612  		s.scavenger.park()
  1613  		for {
  1614  			select {
  1615  			case <-stop:
  1616  				close(done)
  1617  				return
  1618  			default:
  1619  			}
  1620  			released, workTime := s.scavenger.run()
  1621  			if released == 0 {
  1622  				s.scavenger.park()
  1623  				continue
  1624  			}
  1625  			s.released.Add(released)
  1626  			s.scavenger.sleep(workTime)
  1627  		}
  1628  	}()
  1629  	if !s.BlockUntilParked(1e9 /* 1 second */) {
  1630  		panic("timed out waiting for scavenger to get ready")
  1631  	}
  1632  }
  1633  
  1634  // BlockUntilParked blocks until the scavenger parks, or until
  1635  // timeout is exceeded. Returns true if the scavenger parked.
  1636  //
  1637  // Note that in testing, parked means something slightly different.
  1638  // In anger, the scavenger parks to sleep, too, but in testing,
  1639  // it only parks when it actually has no work to do.
  1640  func (s *Scavenger) BlockUntilParked(timeout int64) bool {
  1641  	// Just spin, waiting for it to park.
  1642  	//
  1643  	// The actual parking process is racy with respect to
  1644  	// wakeups, which is fine, but for testing we need something
  1645  	// a bit more robust.
  1646  	start := nanotime()
  1647  	for nanotime()-start < timeout {
  1648  		lock(&s.scavenger.lock)
  1649  		parked := s.scavenger.parked
  1650  		unlock(&s.scavenger.lock)
  1651  		if parked {
  1652  			return true
  1653  		}
  1654  		Gosched()
  1655  	}
  1656  	return false
  1657  }
  1658  
  1659  // Released returns how many bytes the scavenger released.
  1660  func (s *Scavenger) Released() uintptr {
  1661  	return s.released.Load()
  1662  }
  1663  
  1664  // Wake wakes up a parked scavenger to keep running.
  1665  func (s *Scavenger) Wake() {
  1666  	s.scavenger.wake()
  1667  }
  1668  
  1669  // Stop cleans up the scavenger's resources. The scavenger
  1670  // must be parked for this to work.
  1671  func (s *Scavenger) Stop() {
  1672  	lock(&s.scavenger.lock)
  1673  	parked := s.scavenger.parked
  1674  	unlock(&s.scavenger.lock)
  1675  	if !parked {
  1676  		panic("tried to clean up scavenger that is not parked")
  1677  	}
  1678  	close(s.stop)
  1679  	s.Wake()
  1680  	<-s.done
  1681  }
  1682  
  1683  type ScavengeIndex struct {
  1684  	i scavengeIndex
  1685  }
  1686  
  1687  func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
  1688  	s := new(ScavengeIndex)
  1689  	// This is a bit lazy but we easily guarantee we'll be able
  1690  	// to reference all the relevant chunks. The worst-case
  1691  	// memory usage here is 512 MiB, but tests generally use
  1692  	// small offsets from BaseChunkIdx, which results in ~100s
  1693  	// of KiB in memory use.
  1694  	//
  1695  	// This may still be worth making better, at least by sharing
  1696  	// this fairly large array across calls with a sync.Pool or
  1697  	// something. Currently, when the tests are run serially,
  1698  	// it takes around 0.5s. Not all that much, but if we have
  1699  	// a lot of tests like this it could add up.
  1700  	s.i.chunks = make([]atomicScavChunkData, max)
  1701  	s.i.min.Store(uintptr(min))
  1702  	s.i.max.Store(uintptr(max))
  1703  	s.i.minHeapIdx.Store(uintptr(min))
  1704  	s.i.test = true
  1705  	return s
  1706  }
  1707  
  1708  func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
  1709  	ci, off := s.i.find(force)
  1710  	return ChunkIdx(ci), off
  1711  }
  1712  
  1713  func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
  1714  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1715  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1716  
  1717  	if sc == ec {
  1718  		// The range doesn't cross any chunk boundaries.
  1719  		s.i.alloc(sc, ei+1-si)
  1720  	} else {
  1721  		// The range crosses at least one chunk boundary.
  1722  		s.i.alloc(sc, pallocChunkPages-si)
  1723  		for c := sc + 1; c < ec; c++ {
  1724  			s.i.alloc(c, pallocChunkPages)
  1725  		}
  1726  		s.i.alloc(ec, ei+1)
  1727  	}
  1728  }
  1729  
  1730  func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
  1731  	sc, ec := chunkIndex(base), chunkIndex(limit-1)
  1732  	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
  1733  
  1734  	if sc == ec {
  1735  		// The range doesn't cross any chunk boundaries.
  1736  		s.i.free(sc, si, ei+1-si)
  1737  	} else {
  1738  		// The range crosses at least one chunk boundary.
  1739  		s.i.free(sc, si, pallocChunkPages-si)
  1740  		for c := sc + 1; c < ec; c++ {
  1741  			s.i.free(c, 0, pallocChunkPages)
  1742  		}
  1743  		s.i.free(ec, 0, ei+1)
  1744  	}
  1745  }
  1746  
  1747  func (s *ScavengeIndex) ResetSearchAddrs() {
  1748  	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
  1749  		addr, marked := a.Load()
  1750  		if marked {
  1751  			a.StoreUnmark(addr, addr)
  1752  		}
  1753  		a.Clear()
  1754  	}
  1755  	s.i.freeHWM = minOffAddr
  1756  }
  1757  
  1758  func (s *ScavengeIndex) NextGen() {
  1759  	s.i.nextGen()
  1760  }
  1761  
  1762  func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
  1763  	s.i.setEmpty(chunkIdx(ci))
  1764  }
  1765  
  1766  func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
  1767  	sc0 := scavChunkData{
  1768  		gen:            gen,
  1769  		inUse:          inUse,
  1770  		lastInUse:      lastInUse,
  1771  		scavChunkFlags: scavChunkFlags(flags),
  1772  	}
  1773  	scp := sc0.pack()
  1774  	sc1 := unpackScavChunkData(scp)
  1775  	return sc0 == sc1
  1776  }
  1777  
  1778  const GTrackingPeriod = gTrackingPeriod
  1779  
  1780  var ZeroBase = unsafe.Pointer(&zerobase)
  1781  
  1782  const UserArenaChunkBytes = userArenaChunkBytes
  1783  
  1784  type UserArena struct {
  1785  	arena *userArena
  1786  }
  1787  
  1788  func NewUserArena() *UserArena {
  1789  	return &UserArena{newUserArena()}
  1790  }
  1791  
  1792  func (a *UserArena) New(out *any) {
  1793  	i := efaceOf(out)
  1794  	typ := i._type
  1795  	if typ.Kind() != abi.Pointer {
  1796  		panic("new result of non-ptr type")
  1797  	}
  1798  	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
  1799  	i.data = a.arena.new(typ)
  1800  }
  1801  
  1802  func (a *UserArena) Slice(sl any, cap int) {
  1803  	a.arena.slice(sl, cap)
  1804  }
  1805  
  1806  func (a *UserArena) Free() {
  1807  	a.arena.free()
  1808  }
  1809  
  1810  func GlobalWaitingArenaChunks() int {
  1811  	n := 0
  1812  	systemstack(func() {
  1813  		lock(&mheap_.lock)
  1814  		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
  1815  			n++
  1816  		}
  1817  		unlock(&mheap_.lock)
  1818  	})
  1819  	return n
  1820  }
  1821  
  1822  func UserArenaClone[T any](s T) T {
  1823  	return arena_heapify(s).(T)
  1824  }
  1825  
  1826  var AlignUp = alignUp
  1827  
  1828  func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
  1829  	return blockUntilEmptyFinalizerQueue(timeout)
  1830  }
  1831  
  1832  func BlockUntilEmptyCleanupQueue(timeout int64) bool {
  1833  	return gcCleanups.blockUntilEmpty(timeout)
  1834  }
  1835  
  1836  func FrameStartLine(f *Frame) int {
  1837  	return f.startLine
  1838  }
  1839  
  1840  // PersistentAlloc allocates some memory that lives outside the Go heap.
  1841  // This memory will never be freed; use sparingly.
  1842  func PersistentAlloc(n, align uintptr) unsafe.Pointer {
  1843  	return persistentalloc(n, align, &memstats.other_sys)
  1844  }
  1845  
  1846  const TagAlign = tagAlign
  1847  
  1848  // FPCallers works like Callers and uses frame pointer unwinding to populate
  1849  // pcBuf with the return addresses of the physical frames on the stack.
  1850  func FPCallers(pcBuf []uintptr) int {
  1851  	return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
  1852  }
  1853  
  1854  const FramePointerEnabled = framepointer_enabled
  1855  
  1856  var (
  1857  	IsPinned      = isPinned
  1858  	GetPinCounter = pinnerGetPinCounter
  1859  )
  1860  
  1861  func SetPinnerLeakPanic(f func()) {
  1862  	pinnerLeakPanic = f
  1863  }
  1864  func GetPinnerLeakPanic() func() {
  1865  	return pinnerLeakPanic
  1866  }
  1867  
  1868  var testUintptr uintptr
  1869  
  1870  func MyGenericFunc[T any]() {
  1871  	systemstack(func() {
  1872  		testUintptr = 4
  1873  	})
  1874  }
  1875  
  1876  func UnsafePoint(pc uintptr) bool {
  1877  	fi := findfunc(pc)
  1878  	v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
  1879  	switch v {
  1880  	case abi.UnsafePointUnsafe:
  1881  		return true
  1882  	case abi.UnsafePointSafe:
  1883  		return false
  1884  	case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
  1885  		// These are all interruptible, they just encode a nonstandard
  1886  		// way of recovering when interrupted.
  1887  		return false
  1888  	default:
  1889  		var buf [20]byte
  1890  		panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
  1891  	}
  1892  }
  1893  
  1894  type TraceMap struct {
  1895  	traceMap
  1896  }
  1897  
  1898  func (m *TraceMap) PutString(s string) (uint64, bool) {
  1899  	return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
  1900  }
  1901  
  1902  func (m *TraceMap) Reset() {
  1903  	m.traceMap.reset()
  1904  }
  1905  
  1906  func SetSpinInGCMarkDone(spin bool) {
  1907  	gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
  1908  }
  1909  
  1910  func GCMarkDoneRestarted() bool {
  1911  	// Only read this outside of the GC. If we're running during a GC, just report false.
  1912  	mp := acquirem()
  1913  	if gcphase != _GCoff {
  1914  		releasem(mp)
  1915  		return false
  1916  	}
  1917  	restarted := gcDebugMarkDone.restartedDueTo27993
  1918  	releasem(mp)
  1919  	return restarted
  1920  }
  1921  
  1922  func GCMarkDoneResetRestartFlag() {
  1923  	mp := acquirem()
  1924  	for gcphase != _GCoff {
  1925  		releasem(mp)
  1926  		Gosched()
  1927  		mp = acquirem()
  1928  	}
  1929  	gcDebugMarkDone.restartedDueTo27993 = false
  1930  	releasem(mp)
  1931  }
  1932  
  1933  type BitCursor struct {
  1934  	b bitCursor
  1935  }
  1936  
  1937  func NewBitCursor(buf *byte) BitCursor {
  1938  	return BitCursor{b: bitCursor{ptr: buf, n: 0}}
  1939  }
  1940  
  1941  func (b BitCursor) Write(data *byte, cnt uintptr) {
  1942  	b.b.write(data, cnt)
  1943  }
  1944  func (b BitCursor) Offset(cnt uintptr) BitCursor {
  1945  	return BitCursor{b: b.b.offset(cnt)}
  1946  }
  1947  
  1948  const (
  1949  	BubbleAssocUnbubbled     = bubbleAssocUnbubbled
  1950  	BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
  1951  	BubbleAssocOtherBubble   = bubbleAssocOtherBubble
  1952  )
  1953  
  1954  type TraceStackTable traceStackTable
  1955  
  1956  func (t *TraceStackTable) Reset() {
  1957  	t.tab.reset()
  1958  }
  1959  
  1960  func TraceStack(gp *G, tab *TraceStackTable) {
  1961  	traceStack(0, gp, (*traceStackTable)(tab))
  1962  }
  1963  

View as plain text