Source file src/runtime/malloc_generated.go

     1  // Code generated by mkmalloc.go; DO NOT EDIT.
     2  
     3  package runtime
     4  
     5  import (
     6  	"internal/goarch"
     7  	"internal/runtime/sys"
     8  	"unsafe"
     9  )
    10  
    11  func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
    12  	if doubleCheckMalloc {
    13  		if gcphase == _GCmarktermination {
    14  			throw("mallocgc called with gcphase == _GCmarktermination")
    15  		}
    16  	}
    17  
    18  	lockRankMayQueueFinalizer()
    19  
    20  	if debug.malloc {
    21  		if x := preMallocgcDebug(size, typ); x != nil {
    22  			return x
    23  		}
    24  	}
    25  
    26  	if gcBlackenEnabled != 0 {
    27  		deductAssistCredit(size)
    28  	}
    29  
    30  	const sizeclass = 1
    31  
    32  	const elemsize = 8
    33  
    34  	mp := acquirem()
    35  	if doubleCheckMalloc {
    36  		doubleCheckSmallScanNoHeader(size, typ, mp)
    37  	}
    38  	mp.mallocing = 1
    39  
    40  	checkGCTrigger := false
    41  	c := getMCache(mp)
    42  	const spc = spanClass(sizeclass<<1) | spanClass(0)
    43  	span := c.alloc[spc]
    44  
    45  	var nextFreeFastResult gclinkptr
    46  	if span.allocCache != 0 {
    47  		theBit := sys.TrailingZeros64(span.allocCache)
    48  		result := span.freeindex + uint16(theBit)
    49  		if result < span.nelems {
    50  			freeidx := result + 1
    51  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
    52  				span.allocCache >>= uint(theBit + 1)
    53  				span.freeindex = freeidx
    54  				span.allocCount++
    55  				nextFreeFastResult = gclinkptr(uintptr(result)*
    56  					8 +
    57  					span.base())
    58  			}
    59  		}
    60  	}
    61  	v := nextFreeFastResult
    62  	if v == 0 {
    63  		v, span, checkGCTrigger = c.nextFree(spc)
    64  	}
    65  	x := unsafe.Pointer(v)
    66  	if span.needzero != 0 {
    67  		memclrNoHeapPointers(x, elemsize)
    68  	}
    69  	if goarch.PtrSize == 8 && sizeclass == 1 {
    70  
    71  		c.scanAlloc += 8
    72  	} else {
    73  		dataSize := size
    74  		x := uintptr(x)
    75  
    76  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) {
    77  			throw("tried to write heap bits, but no heap bits in span")
    78  		}
    79  
    80  		src0 := readUintptr(getGCMask(typ))
    81  
    82  		const elemsize = 8
    83  
    84  		scanSize := typ.PtrBytes
    85  		src := src0
    86  		if typ.Size_ == goarch.PtrSize {
    87  			src = (1 << (dataSize / goarch.PtrSize)) - 1
    88  		} else {
    89  
    90  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
    91  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
    92  			}
    93  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
    94  				src |= src0 << (i / goarch.PtrSize)
    95  				scanSize += typ.Size_
    96  			}
    97  		}
    98  
    99  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   100  		dst := unsafe.Pointer(dstBase)
   101  		o := (x - span.base()) / goarch.PtrSize
   102  		i := o / ptrBits
   103  		j := o % ptrBits
   104  		const bits uintptr = elemsize / goarch.PtrSize
   105  
   106  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   107  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   108  
   109  			bits0 := ptrBits - j
   110  			bits1 := bits - bits0
   111  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   112  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   113  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   114  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   115  		} else {
   116  
   117  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   118  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   119  		}
   120  
   121  		const doubleCheck = false
   122  		if doubleCheck {
   123  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   124  		}
   125  		if doubleCheckHeapSetType {
   126  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   127  		}
   128  		c.scanAlloc += scanSize
   129  	}
   130  
   131  	publicationBarrier()
   132  
   133  	if writeBarrier.enabled {
   134  
   135  		gcmarknewobject(span, uintptr(x))
   136  	} else {
   137  
   138  		span.freeIndexForScan = span.freeindex
   139  	}
   140  
   141  	c.nextSample -= int64(elemsize)
   142  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   143  		profilealloc(mp, x, elemsize)
   144  	}
   145  	mp.mallocing = 0
   146  	releasem(mp)
   147  
   148  	if checkGCTrigger {
   149  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   150  			gcStart(t)
   151  		}
   152  	}
   153  	if valgrindenabled {
   154  		valgrindMalloc(x, size)
   155  	}
   156  
   157  	if gcBlackenEnabled != 0 && elemsize != 0 {
   158  		if assistG := getg().m.curg; assistG != nil {
   159  			assistG.gcAssistBytes -= int64(elemsize - size)
   160  		}
   161  	}
   162  
   163  	if debug.malloc {
   164  		postMallocgcDebug(x, elemsize, typ)
   165  	}
   166  	return x
   167  }
   168  
   169  func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   170  	if doubleCheckMalloc {
   171  		if gcphase == _GCmarktermination {
   172  			throw("mallocgc called with gcphase == _GCmarktermination")
   173  		}
   174  	}
   175  
   176  	lockRankMayQueueFinalizer()
   177  
   178  	if debug.malloc {
   179  		if x := preMallocgcDebug(size, typ); x != nil {
   180  			return x
   181  		}
   182  	}
   183  
   184  	if gcBlackenEnabled != 0 {
   185  		deductAssistCredit(size)
   186  	}
   187  
   188  	const sizeclass = 2
   189  
   190  	const elemsize = 16
   191  
   192  	mp := acquirem()
   193  	if doubleCheckMalloc {
   194  		doubleCheckSmallScanNoHeader(size, typ, mp)
   195  	}
   196  	mp.mallocing = 1
   197  
   198  	checkGCTrigger := false
   199  	c := getMCache(mp)
   200  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   201  	span := c.alloc[spc]
   202  
   203  	var nextFreeFastResult gclinkptr
   204  	if span.allocCache != 0 {
   205  		theBit := sys.TrailingZeros64(span.allocCache)
   206  		result := span.freeindex + uint16(theBit)
   207  		if result < span.nelems {
   208  			freeidx := result + 1
   209  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   210  				span.allocCache >>= uint(theBit + 1)
   211  				span.freeindex = freeidx
   212  				span.allocCount++
   213  				nextFreeFastResult = gclinkptr(uintptr(result)*
   214  					16 +
   215  					span.base())
   216  			}
   217  		}
   218  	}
   219  	v := nextFreeFastResult
   220  	if v == 0 {
   221  		v, span, checkGCTrigger = c.nextFree(spc)
   222  	}
   223  	x := unsafe.Pointer(v)
   224  	if span.needzero != 0 {
   225  		memclrNoHeapPointers(x, elemsize)
   226  	}
   227  	if goarch.PtrSize == 8 && sizeclass == 1 {
   228  
   229  		c.scanAlloc += 8
   230  	} else {
   231  		dataSize := size
   232  		x := uintptr(x)
   233  
   234  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) {
   235  			throw("tried to write heap bits, but no heap bits in span")
   236  		}
   237  
   238  		src0 := readUintptr(getGCMask(typ))
   239  
   240  		const elemsize = 16
   241  
   242  		scanSize := typ.PtrBytes
   243  		src := src0
   244  		if typ.Size_ == goarch.PtrSize {
   245  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   246  		} else {
   247  
   248  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   249  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   250  			}
   251  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   252  				src |= src0 << (i / goarch.PtrSize)
   253  				scanSize += typ.Size_
   254  			}
   255  		}
   256  
   257  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   258  		dst := unsafe.Pointer(dstBase)
   259  		o := (x - span.base()) / goarch.PtrSize
   260  		i := o / ptrBits
   261  		j := o % ptrBits
   262  		const bits uintptr = elemsize / goarch.PtrSize
   263  
   264  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   265  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   266  
   267  			bits0 := ptrBits - j
   268  			bits1 := bits - bits0
   269  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   270  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   271  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   272  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   273  		} else {
   274  
   275  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   276  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   277  		}
   278  
   279  		const doubleCheck = false
   280  		if doubleCheck {
   281  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   282  		}
   283  		if doubleCheckHeapSetType {
   284  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   285  		}
   286  		c.scanAlloc += scanSize
   287  	}
   288  
   289  	publicationBarrier()
   290  
   291  	if writeBarrier.enabled {
   292  
   293  		gcmarknewobject(span, uintptr(x))
   294  	} else {
   295  
   296  		span.freeIndexForScan = span.freeindex
   297  	}
   298  
   299  	c.nextSample -= int64(elemsize)
   300  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   301  		profilealloc(mp, x, elemsize)
   302  	}
   303  	mp.mallocing = 0
   304  	releasem(mp)
   305  
   306  	if checkGCTrigger {
   307  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   308  			gcStart(t)
   309  		}
   310  	}
   311  	if valgrindenabled {
   312  		valgrindMalloc(x, size)
   313  	}
   314  
   315  	if gcBlackenEnabled != 0 && elemsize != 0 {
   316  		if assistG := getg().m.curg; assistG != nil {
   317  			assistG.gcAssistBytes -= int64(elemsize - size)
   318  		}
   319  	}
   320  
   321  	if debug.malloc {
   322  		postMallocgcDebug(x, elemsize, typ)
   323  	}
   324  	return x
   325  }
   326  
   327  func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   328  	if doubleCheckMalloc {
   329  		if gcphase == _GCmarktermination {
   330  			throw("mallocgc called with gcphase == _GCmarktermination")
   331  		}
   332  	}
   333  
   334  	lockRankMayQueueFinalizer()
   335  
   336  	if debug.malloc {
   337  		if x := preMallocgcDebug(size, typ); x != nil {
   338  			return x
   339  		}
   340  	}
   341  
   342  	if gcBlackenEnabled != 0 {
   343  		deductAssistCredit(size)
   344  	}
   345  
   346  	const sizeclass = 3
   347  
   348  	const elemsize = 24
   349  
   350  	mp := acquirem()
   351  	if doubleCheckMalloc {
   352  		doubleCheckSmallScanNoHeader(size, typ, mp)
   353  	}
   354  	mp.mallocing = 1
   355  
   356  	checkGCTrigger := false
   357  	c := getMCache(mp)
   358  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   359  	span := c.alloc[spc]
   360  
   361  	var nextFreeFastResult gclinkptr
   362  	if span.allocCache != 0 {
   363  		theBit := sys.TrailingZeros64(span.allocCache)
   364  		result := span.freeindex + uint16(theBit)
   365  		if result < span.nelems {
   366  			freeidx := result + 1
   367  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   368  				span.allocCache >>= uint(theBit + 1)
   369  				span.freeindex = freeidx
   370  				span.allocCount++
   371  				nextFreeFastResult = gclinkptr(uintptr(result)*
   372  					24 +
   373  					span.base())
   374  			}
   375  		}
   376  	}
   377  	v := nextFreeFastResult
   378  	if v == 0 {
   379  		v, span, checkGCTrigger = c.nextFree(spc)
   380  	}
   381  	x := unsafe.Pointer(v)
   382  	if span.needzero != 0 {
   383  		memclrNoHeapPointers(x, elemsize)
   384  	}
   385  	if goarch.PtrSize == 8 && sizeclass == 1 {
   386  
   387  		c.scanAlloc += 8
   388  	} else {
   389  		dataSize := size
   390  		x := uintptr(x)
   391  
   392  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) {
   393  			throw("tried to write heap bits, but no heap bits in span")
   394  		}
   395  
   396  		src0 := readUintptr(getGCMask(typ))
   397  
   398  		const elemsize = 24
   399  
   400  		scanSize := typ.PtrBytes
   401  		src := src0
   402  		if typ.Size_ == goarch.PtrSize {
   403  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   404  		} else {
   405  
   406  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   407  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   408  			}
   409  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   410  				src |= src0 << (i / goarch.PtrSize)
   411  				scanSize += typ.Size_
   412  			}
   413  		}
   414  
   415  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   416  		dst := unsafe.Pointer(dstBase)
   417  		o := (x - span.base()) / goarch.PtrSize
   418  		i := o / ptrBits
   419  		j := o % ptrBits
   420  		const bits uintptr = elemsize / goarch.PtrSize
   421  
   422  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   423  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   424  
   425  			bits0 := ptrBits - j
   426  			bits1 := bits - bits0
   427  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   428  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   429  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   430  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   431  		} else {
   432  
   433  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   434  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   435  		}
   436  
   437  		const doubleCheck = false
   438  		if doubleCheck {
   439  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   440  		}
   441  		if doubleCheckHeapSetType {
   442  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   443  		}
   444  		c.scanAlloc += scanSize
   445  	}
   446  
   447  	publicationBarrier()
   448  
   449  	if writeBarrier.enabled {
   450  
   451  		gcmarknewobject(span, uintptr(x))
   452  	} else {
   453  
   454  		span.freeIndexForScan = span.freeindex
   455  	}
   456  
   457  	c.nextSample -= int64(elemsize)
   458  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   459  		profilealloc(mp, x, elemsize)
   460  	}
   461  	mp.mallocing = 0
   462  	releasem(mp)
   463  
   464  	if checkGCTrigger {
   465  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   466  			gcStart(t)
   467  		}
   468  	}
   469  	if valgrindenabled {
   470  		valgrindMalloc(x, size)
   471  	}
   472  
   473  	if gcBlackenEnabled != 0 && elemsize != 0 {
   474  		if assistG := getg().m.curg; assistG != nil {
   475  			assistG.gcAssistBytes -= int64(elemsize - size)
   476  		}
   477  	}
   478  
   479  	if debug.malloc {
   480  		postMallocgcDebug(x, elemsize, typ)
   481  	}
   482  	return x
   483  }
   484  
   485  func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   486  	if doubleCheckMalloc {
   487  		if gcphase == _GCmarktermination {
   488  			throw("mallocgc called with gcphase == _GCmarktermination")
   489  		}
   490  	}
   491  
   492  	lockRankMayQueueFinalizer()
   493  
   494  	if debug.malloc {
   495  		if x := preMallocgcDebug(size, typ); x != nil {
   496  			return x
   497  		}
   498  	}
   499  
   500  	if gcBlackenEnabled != 0 {
   501  		deductAssistCredit(size)
   502  	}
   503  
   504  	const sizeclass = 4
   505  
   506  	const elemsize = 32
   507  
   508  	mp := acquirem()
   509  	if doubleCheckMalloc {
   510  		doubleCheckSmallScanNoHeader(size, typ, mp)
   511  	}
   512  	mp.mallocing = 1
   513  
   514  	checkGCTrigger := false
   515  	c := getMCache(mp)
   516  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   517  	span := c.alloc[spc]
   518  
   519  	var nextFreeFastResult gclinkptr
   520  	if span.allocCache != 0 {
   521  		theBit := sys.TrailingZeros64(span.allocCache)
   522  		result := span.freeindex + uint16(theBit)
   523  		if result < span.nelems {
   524  			freeidx := result + 1
   525  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   526  				span.allocCache >>= uint(theBit + 1)
   527  				span.freeindex = freeidx
   528  				span.allocCount++
   529  				nextFreeFastResult = gclinkptr(uintptr(result)*
   530  					32 +
   531  					span.base())
   532  			}
   533  		}
   534  	}
   535  	v := nextFreeFastResult
   536  	if v == 0 {
   537  		v, span, checkGCTrigger = c.nextFree(spc)
   538  	}
   539  	x := unsafe.Pointer(v)
   540  	if span.needzero != 0 {
   541  		memclrNoHeapPointers(x, elemsize)
   542  	}
   543  	if goarch.PtrSize == 8 && sizeclass == 1 {
   544  
   545  		c.scanAlloc += 8
   546  	} else {
   547  		dataSize := size
   548  		x := uintptr(x)
   549  
   550  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) {
   551  			throw("tried to write heap bits, but no heap bits in span")
   552  		}
   553  
   554  		src0 := readUintptr(getGCMask(typ))
   555  
   556  		const elemsize = 32
   557  
   558  		scanSize := typ.PtrBytes
   559  		src := src0
   560  		if typ.Size_ == goarch.PtrSize {
   561  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   562  		} else {
   563  
   564  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   565  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   566  			}
   567  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   568  				src |= src0 << (i / goarch.PtrSize)
   569  				scanSize += typ.Size_
   570  			}
   571  		}
   572  
   573  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   574  		dst := unsafe.Pointer(dstBase)
   575  		o := (x - span.base()) / goarch.PtrSize
   576  		i := o / ptrBits
   577  		j := o % ptrBits
   578  		const bits uintptr = elemsize / goarch.PtrSize
   579  
   580  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   581  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   582  
   583  			bits0 := ptrBits - j
   584  			bits1 := bits - bits0
   585  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   586  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   587  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   588  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   589  		} else {
   590  
   591  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   592  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   593  		}
   594  
   595  		const doubleCheck = false
   596  		if doubleCheck {
   597  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   598  		}
   599  		if doubleCheckHeapSetType {
   600  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   601  		}
   602  		c.scanAlloc += scanSize
   603  	}
   604  
   605  	publicationBarrier()
   606  
   607  	if writeBarrier.enabled {
   608  
   609  		gcmarknewobject(span, uintptr(x))
   610  	} else {
   611  
   612  		span.freeIndexForScan = span.freeindex
   613  	}
   614  
   615  	c.nextSample -= int64(elemsize)
   616  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   617  		profilealloc(mp, x, elemsize)
   618  	}
   619  	mp.mallocing = 0
   620  	releasem(mp)
   621  
   622  	if checkGCTrigger {
   623  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   624  			gcStart(t)
   625  		}
   626  	}
   627  	if valgrindenabled {
   628  		valgrindMalloc(x, size)
   629  	}
   630  
   631  	if gcBlackenEnabled != 0 && elemsize != 0 {
   632  		if assistG := getg().m.curg; assistG != nil {
   633  			assistG.gcAssistBytes -= int64(elemsize - size)
   634  		}
   635  	}
   636  
   637  	if debug.malloc {
   638  		postMallocgcDebug(x, elemsize, typ)
   639  	}
   640  	return x
   641  }
   642  
   643  func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   644  	if doubleCheckMalloc {
   645  		if gcphase == _GCmarktermination {
   646  			throw("mallocgc called with gcphase == _GCmarktermination")
   647  		}
   648  	}
   649  
   650  	lockRankMayQueueFinalizer()
   651  
   652  	if debug.malloc {
   653  		if x := preMallocgcDebug(size, typ); x != nil {
   654  			return x
   655  		}
   656  	}
   657  
   658  	if gcBlackenEnabled != 0 {
   659  		deductAssistCredit(size)
   660  	}
   661  
   662  	const sizeclass = 5
   663  
   664  	const elemsize = 48
   665  
   666  	mp := acquirem()
   667  	if doubleCheckMalloc {
   668  		doubleCheckSmallScanNoHeader(size, typ, mp)
   669  	}
   670  	mp.mallocing = 1
   671  
   672  	checkGCTrigger := false
   673  	c := getMCache(mp)
   674  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   675  	span := c.alloc[spc]
   676  
   677  	var nextFreeFastResult gclinkptr
   678  	if span.allocCache != 0 {
   679  		theBit := sys.TrailingZeros64(span.allocCache)
   680  		result := span.freeindex + uint16(theBit)
   681  		if result < span.nelems {
   682  			freeidx := result + 1
   683  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   684  				span.allocCache >>= uint(theBit + 1)
   685  				span.freeindex = freeidx
   686  				span.allocCount++
   687  				nextFreeFastResult = gclinkptr(uintptr(result)*
   688  					48 +
   689  					span.base())
   690  			}
   691  		}
   692  	}
   693  	v := nextFreeFastResult
   694  	if v == 0 {
   695  		v, span, checkGCTrigger = c.nextFree(spc)
   696  	}
   697  	x := unsafe.Pointer(v)
   698  	if span.needzero != 0 {
   699  		memclrNoHeapPointers(x, elemsize)
   700  	}
   701  	if goarch.PtrSize == 8 && sizeclass == 1 {
   702  
   703  		c.scanAlloc += 8
   704  	} else {
   705  		dataSize := size
   706  		x := uintptr(x)
   707  
   708  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) {
   709  			throw("tried to write heap bits, but no heap bits in span")
   710  		}
   711  
   712  		src0 := readUintptr(getGCMask(typ))
   713  
   714  		const elemsize = 48
   715  
   716  		scanSize := typ.PtrBytes
   717  		src := src0
   718  		if typ.Size_ == goarch.PtrSize {
   719  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   720  		} else {
   721  
   722  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   723  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   724  			}
   725  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   726  				src |= src0 << (i / goarch.PtrSize)
   727  				scanSize += typ.Size_
   728  			}
   729  		}
   730  
   731  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   732  		dst := unsafe.Pointer(dstBase)
   733  		o := (x - span.base()) / goarch.PtrSize
   734  		i := o / ptrBits
   735  		j := o % ptrBits
   736  		const bits uintptr = elemsize / goarch.PtrSize
   737  
   738  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   739  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   740  
   741  			bits0 := ptrBits - j
   742  			bits1 := bits - bits0
   743  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   744  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   745  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   746  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   747  		} else {
   748  
   749  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   750  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   751  		}
   752  
   753  		const doubleCheck = false
   754  		if doubleCheck {
   755  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   756  		}
   757  		if doubleCheckHeapSetType {
   758  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   759  		}
   760  		c.scanAlloc += scanSize
   761  	}
   762  
   763  	publicationBarrier()
   764  
   765  	if writeBarrier.enabled {
   766  
   767  		gcmarknewobject(span, uintptr(x))
   768  	} else {
   769  
   770  		span.freeIndexForScan = span.freeindex
   771  	}
   772  
   773  	c.nextSample -= int64(elemsize)
   774  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   775  		profilealloc(mp, x, elemsize)
   776  	}
   777  	mp.mallocing = 0
   778  	releasem(mp)
   779  
   780  	if checkGCTrigger {
   781  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   782  			gcStart(t)
   783  		}
   784  	}
   785  	if valgrindenabled {
   786  		valgrindMalloc(x, size)
   787  	}
   788  
   789  	if gcBlackenEnabled != 0 && elemsize != 0 {
   790  		if assistG := getg().m.curg; assistG != nil {
   791  			assistG.gcAssistBytes -= int64(elemsize - size)
   792  		}
   793  	}
   794  
   795  	if debug.malloc {
   796  		postMallocgcDebug(x, elemsize, typ)
   797  	}
   798  	return x
   799  }
   800  
   801  func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   802  	if doubleCheckMalloc {
   803  		if gcphase == _GCmarktermination {
   804  			throw("mallocgc called with gcphase == _GCmarktermination")
   805  		}
   806  	}
   807  
   808  	lockRankMayQueueFinalizer()
   809  
   810  	if debug.malloc {
   811  		if x := preMallocgcDebug(size, typ); x != nil {
   812  			return x
   813  		}
   814  	}
   815  
   816  	if gcBlackenEnabled != 0 {
   817  		deductAssistCredit(size)
   818  	}
   819  
   820  	const sizeclass = 6
   821  
   822  	const elemsize = 64
   823  
   824  	mp := acquirem()
   825  	if doubleCheckMalloc {
   826  		doubleCheckSmallScanNoHeader(size, typ, mp)
   827  	}
   828  	mp.mallocing = 1
   829  
   830  	checkGCTrigger := false
   831  	c := getMCache(mp)
   832  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   833  	span := c.alloc[spc]
   834  
   835  	var nextFreeFastResult gclinkptr
   836  	if span.allocCache != 0 {
   837  		theBit := sys.TrailingZeros64(span.allocCache)
   838  		result := span.freeindex + uint16(theBit)
   839  		if result < span.nelems {
   840  			freeidx := result + 1
   841  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   842  				span.allocCache >>= uint(theBit + 1)
   843  				span.freeindex = freeidx
   844  				span.allocCount++
   845  				nextFreeFastResult = gclinkptr(uintptr(result)*
   846  					64 +
   847  					span.base())
   848  			}
   849  		}
   850  	}
   851  	v := nextFreeFastResult
   852  	if v == 0 {
   853  		v, span, checkGCTrigger = c.nextFree(spc)
   854  	}
   855  	x := unsafe.Pointer(v)
   856  	if span.needzero != 0 {
   857  		memclrNoHeapPointers(x, elemsize)
   858  	}
   859  	if goarch.PtrSize == 8 && sizeclass == 1 {
   860  
   861  		c.scanAlloc += 8
   862  	} else {
   863  		dataSize := size
   864  		x := uintptr(x)
   865  
   866  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) {
   867  			throw("tried to write heap bits, but no heap bits in span")
   868  		}
   869  
   870  		src0 := readUintptr(getGCMask(typ))
   871  
   872  		const elemsize = 64
   873  
   874  		scanSize := typ.PtrBytes
   875  		src := src0
   876  		if typ.Size_ == goarch.PtrSize {
   877  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   878  		} else {
   879  
   880  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   881  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   882  			}
   883  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   884  				src |= src0 << (i / goarch.PtrSize)
   885  				scanSize += typ.Size_
   886  			}
   887  		}
   888  
   889  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   890  		dst := unsafe.Pointer(dstBase)
   891  		o := (x - span.base()) / goarch.PtrSize
   892  		i := o / ptrBits
   893  		j := o % ptrBits
   894  		const bits uintptr = elemsize / goarch.PtrSize
   895  
   896  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   897  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   898  
   899  			bits0 := ptrBits - j
   900  			bits1 := bits - bits0
   901  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   902  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   903  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   904  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   905  		} else {
   906  
   907  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   908  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   909  		}
   910  
   911  		const doubleCheck = false
   912  		if doubleCheck {
   913  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   914  		}
   915  		if doubleCheckHeapSetType {
   916  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   917  		}
   918  		c.scanAlloc += scanSize
   919  	}
   920  
   921  	publicationBarrier()
   922  
   923  	if writeBarrier.enabled {
   924  
   925  		gcmarknewobject(span, uintptr(x))
   926  	} else {
   927  
   928  		span.freeIndexForScan = span.freeindex
   929  	}
   930  
   931  	c.nextSample -= int64(elemsize)
   932  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   933  		profilealloc(mp, x, elemsize)
   934  	}
   935  	mp.mallocing = 0
   936  	releasem(mp)
   937  
   938  	if checkGCTrigger {
   939  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   940  			gcStart(t)
   941  		}
   942  	}
   943  	if valgrindenabled {
   944  		valgrindMalloc(x, size)
   945  	}
   946  
   947  	if gcBlackenEnabled != 0 && elemsize != 0 {
   948  		if assistG := getg().m.curg; assistG != nil {
   949  			assistG.gcAssistBytes -= int64(elemsize - size)
   950  		}
   951  	}
   952  
   953  	if debug.malloc {
   954  		postMallocgcDebug(x, elemsize, typ)
   955  	}
   956  	return x
   957  }
   958  
   959  func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   960  	if doubleCheckMalloc {
   961  		if gcphase == _GCmarktermination {
   962  			throw("mallocgc called with gcphase == _GCmarktermination")
   963  		}
   964  	}
   965  
   966  	lockRankMayQueueFinalizer()
   967  
   968  	if debug.malloc {
   969  		if x := preMallocgcDebug(size, typ); x != nil {
   970  			return x
   971  		}
   972  	}
   973  
   974  	if gcBlackenEnabled != 0 {
   975  		deductAssistCredit(size)
   976  	}
   977  
   978  	const sizeclass = 7
   979  
   980  	const elemsize = 80
   981  
   982  	mp := acquirem()
   983  	if doubleCheckMalloc {
   984  		doubleCheckSmallScanNoHeader(size, typ, mp)
   985  	}
   986  	mp.mallocing = 1
   987  
   988  	checkGCTrigger := false
   989  	c := getMCache(mp)
   990  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   991  	span := c.alloc[spc]
   992  
   993  	var nextFreeFastResult gclinkptr
   994  	if span.allocCache != 0 {
   995  		theBit := sys.TrailingZeros64(span.allocCache)
   996  		result := span.freeindex + uint16(theBit)
   997  		if result < span.nelems {
   998  			freeidx := result + 1
   999  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1000  				span.allocCache >>= uint(theBit + 1)
  1001  				span.freeindex = freeidx
  1002  				span.allocCount++
  1003  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1004  					80 +
  1005  					span.base())
  1006  			}
  1007  		}
  1008  	}
  1009  	v := nextFreeFastResult
  1010  	if v == 0 {
  1011  		v, span, checkGCTrigger = c.nextFree(spc)
  1012  	}
  1013  	x := unsafe.Pointer(v)
  1014  	if span.needzero != 0 {
  1015  		memclrNoHeapPointers(x, elemsize)
  1016  	}
  1017  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1018  
  1019  		c.scanAlloc += 8
  1020  	} else {
  1021  		dataSize := size
  1022  		x := uintptr(x)
  1023  
  1024  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) {
  1025  			throw("tried to write heap bits, but no heap bits in span")
  1026  		}
  1027  
  1028  		src0 := readUintptr(getGCMask(typ))
  1029  
  1030  		const elemsize = 80
  1031  
  1032  		scanSize := typ.PtrBytes
  1033  		src := src0
  1034  		if typ.Size_ == goarch.PtrSize {
  1035  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1036  		} else {
  1037  
  1038  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1039  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1040  			}
  1041  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1042  				src |= src0 << (i / goarch.PtrSize)
  1043  				scanSize += typ.Size_
  1044  			}
  1045  		}
  1046  
  1047  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1048  		dst := unsafe.Pointer(dstBase)
  1049  		o := (x - span.base()) / goarch.PtrSize
  1050  		i := o / ptrBits
  1051  		j := o % ptrBits
  1052  		const bits uintptr = elemsize / goarch.PtrSize
  1053  
  1054  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1055  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1056  
  1057  			bits0 := ptrBits - j
  1058  			bits1 := bits - bits0
  1059  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1060  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1061  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1062  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1063  		} else {
  1064  
  1065  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1066  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1067  		}
  1068  
  1069  		const doubleCheck = false
  1070  		if doubleCheck {
  1071  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1072  		}
  1073  		if doubleCheckHeapSetType {
  1074  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1075  		}
  1076  		c.scanAlloc += scanSize
  1077  	}
  1078  
  1079  	publicationBarrier()
  1080  
  1081  	if writeBarrier.enabled {
  1082  
  1083  		gcmarknewobject(span, uintptr(x))
  1084  	} else {
  1085  
  1086  		span.freeIndexForScan = span.freeindex
  1087  	}
  1088  
  1089  	c.nextSample -= int64(elemsize)
  1090  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1091  		profilealloc(mp, x, elemsize)
  1092  	}
  1093  	mp.mallocing = 0
  1094  	releasem(mp)
  1095  
  1096  	if checkGCTrigger {
  1097  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1098  			gcStart(t)
  1099  		}
  1100  	}
  1101  	if valgrindenabled {
  1102  		valgrindMalloc(x, size)
  1103  	}
  1104  
  1105  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1106  		if assistG := getg().m.curg; assistG != nil {
  1107  			assistG.gcAssistBytes -= int64(elemsize - size)
  1108  		}
  1109  	}
  1110  
  1111  	if debug.malloc {
  1112  		postMallocgcDebug(x, elemsize, typ)
  1113  	}
  1114  	return x
  1115  }
  1116  
  1117  func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1118  	if doubleCheckMalloc {
  1119  		if gcphase == _GCmarktermination {
  1120  			throw("mallocgc called with gcphase == _GCmarktermination")
  1121  		}
  1122  	}
  1123  
  1124  	lockRankMayQueueFinalizer()
  1125  
  1126  	if debug.malloc {
  1127  		if x := preMallocgcDebug(size, typ); x != nil {
  1128  			return x
  1129  		}
  1130  	}
  1131  
  1132  	if gcBlackenEnabled != 0 {
  1133  		deductAssistCredit(size)
  1134  	}
  1135  
  1136  	const sizeclass = 8
  1137  
  1138  	const elemsize = 96
  1139  
  1140  	mp := acquirem()
  1141  	if doubleCheckMalloc {
  1142  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1143  	}
  1144  	mp.mallocing = 1
  1145  
  1146  	checkGCTrigger := false
  1147  	c := getMCache(mp)
  1148  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1149  	span := c.alloc[spc]
  1150  
  1151  	var nextFreeFastResult gclinkptr
  1152  	if span.allocCache != 0 {
  1153  		theBit := sys.TrailingZeros64(span.allocCache)
  1154  		result := span.freeindex + uint16(theBit)
  1155  		if result < span.nelems {
  1156  			freeidx := result + 1
  1157  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1158  				span.allocCache >>= uint(theBit + 1)
  1159  				span.freeindex = freeidx
  1160  				span.allocCount++
  1161  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1162  					96 +
  1163  					span.base())
  1164  			}
  1165  		}
  1166  	}
  1167  	v := nextFreeFastResult
  1168  	if v == 0 {
  1169  		v, span, checkGCTrigger = c.nextFree(spc)
  1170  	}
  1171  	x := unsafe.Pointer(v)
  1172  	if span.needzero != 0 {
  1173  		memclrNoHeapPointers(x, elemsize)
  1174  	}
  1175  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1176  
  1177  		c.scanAlloc += 8
  1178  	} else {
  1179  		dataSize := size
  1180  		x := uintptr(x)
  1181  
  1182  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) {
  1183  			throw("tried to write heap bits, but no heap bits in span")
  1184  		}
  1185  
  1186  		src0 := readUintptr(getGCMask(typ))
  1187  
  1188  		const elemsize = 96
  1189  
  1190  		scanSize := typ.PtrBytes
  1191  		src := src0
  1192  		if typ.Size_ == goarch.PtrSize {
  1193  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1194  		} else {
  1195  
  1196  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1197  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1198  			}
  1199  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1200  				src |= src0 << (i / goarch.PtrSize)
  1201  				scanSize += typ.Size_
  1202  			}
  1203  		}
  1204  
  1205  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1206  		dst := unsafe.Pointer(dstBase)
  1207  		o := (x - span.base()) / goarch.PtrSize
  1208  		i := o / ptrBits
  1209  		j := o % ptrBits
  1210  		const bits uintptr = elemsize / goarch.PtrSize
  1211  
  1212  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1213  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1214  
  1215  			bits0 := ptrBits - j
  1216  			bits1 := bits - bits0
  1217  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1218  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1219  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1220  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1221  		} else {
  1222  
  1223  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1224  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1225  		}
  1226  
  1227  		const doubleCheck = false
  1228  		if doubleCheck {
  1229  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1230  		}
  1231  		if doubleCheckHeapSetType {
  1232  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1233  		}
  1234  		c.scanAlloc += scanSize
  1235  	}
  1236  
  1237  	publicationBarrier()
  1238  
  1239  	if writeBarrier.enabled {
  1240  
  1241  		gcmarknewobject(span, uintptr(x))
  1242  	} else {
  1243  
  1244  		span.freeIndexForScan = span.freeindex
  1245  	}
  1246  
  1247  	c.nextSample -= int64(elemsize)
  1248  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1249  		profilealloc(mp, x, elemsize)
  1250  	}
  1251  	mp.mallocing = 0
  1252  	releasem(mp)
  1253  
  1254  	if checkGCTrigger {
  1255  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1256  			gcStart(t)
  1257  		}
  1258  	}
  1259  	if valgrindenabled {
  1260  		valgrindMalloc(x, size)
  1261  	}
  1262  
  1263  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1264  		if assistG := getg().m.curg; assistG != nil {
  1265  			assistG.gcAssistBytes -= int64(elemsize - size)
  1266  		}
  1267  	}
  1268  
  1269  	if debug.malloc {
  1270  		postMallocgcDebug(x, elemsize, typ)
  1271  	}
  1272  	return x
  1273  }
  1274  
  1275  func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1276  	if doubleCheckMalloc {
  1277  		if gcphase == _GCmarktermination {
  1278  			throw("mallocgc called with gcphase == _GCmarktermination")
  1279  		}
  1280  	}
  1281  
  1282  	lockRankMayQueueFinalizer()
  1283  
  1284  	if debug.malloc {
  1285  		if x := preMallocgcDebug(size, typ); x != nil {
  1286  			return x
  1287  		}
  1288  	}
  1289  
  1290  	if gcBlackenEnabled != 0 {
  1291  		deductAssistCredit(size)
  1292  	}
  1293  
  1294  	const sizeclass = 9
  1295  
  1296  	const elemsize = 112
  1297  
  1298  	mp := acquirem()
  1299  	if doubleCheckMalloc {
  1300  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1301  	}
  1302  	mp.mallocing = 1
  1303  
  1304  	checkGCTrigger := false
  1305  	c := getMCache(mp)
  1306  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1307  	span := c.alloc[spc]
  1308  
  1309  	var nextFreeFastResult gclinkptr
  1310  	if span.allocCache != 0 {
  1311  		theBit := sys.TrailingZeros64(span.allocCache)
  1312  		result := span.freeindex + uint16(theBit)
  1313  		if result < span.nelems {
  1314  			freeidx := result + 1
  1315  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1316  				span.allocCache >>= uint(theBit + 1)
  1317  				span.freeindex = freeidx
  1318  				span.allocCount++
  1319  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1320  					112 +
  1321  					span.base())
  1322  			}
  1323  		}
  1324  	}
  1325  	v := nextFreeFastResult
  1326  	if v == 0 {
  1327  		v, span, checkGCTrigger = c.nextFree(spc)
  1328  	}
  1329  	x := unsafe.Pointer(v)
  1330  	if span.needzero != 0 {
  1331  		memclrNoHeapPointers(x, elemsize)
  1332  	}
  1333  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1334  
  1335  		c.scanAlloc += 8
  1336  	} else {
  1337  		dataSize := size
  1338  		x := uintptr(x)
  1339  
  1340  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) {
  1341  			throw("tried to write heap bits, but no heap bits in span")
  1342  		}
  1343  
  1344  		src0 := readUintptr(getGCMask(typ))
  1345  
  1346  		const elemsize = 112
  1347  
  1348  		scanSize := typ.PtrBytes
  1349  		src := src0
  1350  		if typ.Size_ == goarch.PtrSize {
  1351  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1352  		} else {
  1353  
  1354  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1355  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1356  			}
  1357  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1358  				src |= src0 << (i / goarch.PtrSize)
  1359  				scanSize += typ.Size_
  1360  			}
  1361  		}
  1362  
  1363  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1364  		dst := unsafe.Pointer(dstBase)
  1365  		o := (x - span.base()) / goarch.PtrSize
  1366  		i := o / ptrBits
  1367  		j := o % ptrBits
  1368  		const bits uintptr = elemsize / goarch.PtrSize
  1369  
  1370  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1371  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1372  
  1373  			bits0 := ptrBits - j
  1374  			bits1 := bits - bits0
  1375  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1376  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1377  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1378  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1379  		} else {
  1380  
  1381  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1382  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1383  		}
  1384  
  1385  		const doubleCheck = false
  1386  		if doubleCheck {
  1387  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1388  		}
  1389  		if doubleCheckHeapSetType {
  1390  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1391  		}
  1392  		c.scanAlloc += scanSize
  1393  	}
  1394  
  1395  	publicationBarrier()
  1396  
  1397  	if writeBarrier.enabled {
  1398  
  1399  		gcmarknewobject(span, uintptr(x))
  1400  	} else {
  1401  
  1402  		span.freeIndexForScan = span.freeindex
  1403  	}
  1404  
  1405  	c.nextSample -= int64(elemsize)
  1406  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1407  		profilealloc(mp, x, elemsize)
  1408  	}
  1409  	mp.mallocing = 0
  1410  	releasem(mp)
  1411  
  1412  	if checkGCTrigger {
  1413  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1414  			gcStart(t)
  1415  		}
  1416  	}
  1417  	if valgrindenabled {
  1418  		valgrindMalloc(x, size)
  1419  	}
  1420  
  1421  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1422  		if assistG := getg().m.curg; assistG != nil {
  1423  			assistG.gcAssistBytes -= int64(elemsize - size)
  1424  		}
  1425  	}
  1426  
  1427  	if debug.malloc {
  1428  		postMallocgcDebug(x, elemsize, typ)
  1429  	}
  1430  	return x
  1431  }
  1432  
  1433  func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1434  	if doubleCheckMalloc {
  1435  		if gcphase == _GCmarktermination {
  1436  			throw("mallocgc called with gcphase == _GCmarktermination")
  1437  		}
  1438  	}
  1439  
  1440  	lockRankMayQueueFinalizer()
  1441  
  1442  	if debug.malloc {
  1443  		if x := preMallocgcDebug(size, typ); x != nil {
  1444  			return x
  1445  		}
  1446  	}
  1447  
  1448  	if gcBlackenEnabled != 0 {
  1449  		deductAssistCredit(size)
  1450  	}
  1451  
  1452  	const sizeclass = 10
  1453  
  1454  	const elemsize = 128
  1455  
  1456  	mp := acquirem()
  1457  	if doubleCheckMalloc {
  1458  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1459  	}
  1460  	mp.mallocing = 1
  1461  
  1462  	checkGCTrigger := false
  1463  	c := getMCache(mp)
  1464  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1465  	span := c.alloc[spc]
  1466  
  1467  	var nextFreeFastResult gclinkptr
  1468  	if span.allocCache != 0 {
  1469  		theBit := sys.TrailingZeros64(span.allocCache)
  1470  		result := span.freeindex + uint16(theBit)
  1471  		if result < span.nelems {
  1472  			freeidx := result + 1
  1473  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1474  				span.allocCache >>= uint(theBit + 1)
  1475  				span.freeindex = freeidx
  1476  				span.allocCount++
  1477  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1478  					128 +
  1479  					span.base())
  1480  			}
  1481  		}
  1482  	}
  1483  	v := nextFreeFastResult
  1484  	if v == 0 {
  1485  		v, span, checkGCTrigger = c.nextFree(spc)
  1486  	}
  1487  	x := unsafe.Pointer(v)
  1488  	if span.needzero != 0 {
  1489  		memclrNoHeapPointers(x, elemsize)
  1490  	}
  1491  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1492  
  1493  		c.scanAlloc += 8
  1494  	} else {
  1495  		dataSize := size
  1496  		x := uintptr(x)
  1497  
  1498  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) {
  1499  			throw("tried to write heap bits, but no heap bits in span")
  1500  		}
  1501  
  1502  		src0 := readUintptr(getGCMask(typ))
  1503  
  1504  		const elemsize = 128
  1505  
  1506  		scanSize := typ.PtrBytes
  1507  		src := src0
  1508  		if typ.Size_ == goarch.PtrSize {
  1509  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1510  		} else {
  1511  
  1512  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1513  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1514  			}
  1515  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1516  				src |= src0 << (i / goarch.PtrSize)
  1517  				scanSize += typ.Size_
  1518  			}
  1519  		}
  1520  
  1521  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1522  		dst := unsafe.Pointer(dstBase)
  1523  		o := (x - span.base()) / goarch.PtrSize
  1524  		i := o / ptrBits
  1525  		j := o % ptrBits
  1526  		const bits uintptr = elemsize / goarch.PtrSize
  1527  
  1528  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1529  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1530  
  1531  			bits0 := ptrBits - j
  1532  			bits1 := bits - bits0
  1533  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1534  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1535  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1536  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1537  		} else {
  1538  
  1539  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1540  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1541  		}
  1542  
  1543  		const doubleCheck = false
  1544  		if doubleCheck {
  1545  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1546  		}
  1547  		if doubleCheckHeapSetType {
  1548  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1549  		}
  1550  		c.scanAlloc += scanSize
  1551  	}
  1552  
  1553  	publicationBarrier()
  1554  
  1555  	if writeBarrier.enabled {
  1556  
  1557  		gcmarknewobject(span, uintptr(x))
  1558  	} else {
  1559  
  1560  		span.freeIndexForScan = span.freeindex
  1561  	}
  1562  
  1563  	c.nextSample -= int64(elemsize)
  1564  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1565  		profilealloc(mp, x, elemsize)
  1566  	}
  1567  	mp.mallocing = 0
  1568  	releasem(mp)
  1569  
  1570  	if checkGCTrigger {
  1571  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1572  			gcStart(t)
  1573  		}
  1574  	}
  1575  	if valgrindenabled {
  1576  		valgrindMalloc(x, size)
  1577  	}
  1578  
  1579  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1580  		if assistG := getg().m.curg; assistG != nil {
  1581  			assistG.gcAssistBytes -= int64(elemsize - size)
  1582  		}
  1583  	}
  1584  
  1585  	if debug.malloc {
  1586  		postMallocgcDebug(x, elemsize, typ)
  1587  	}
  1588  	return x
  1589  }
  1590  
  1591  func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1592  	if doubleCheckMalloc {
  1593  		if gcphase == _GCmarktermination {
  1594  			throw("mallocgc called with gcphase == _GCmarktermination")
  1595  		}
  1596  	}
  1597  
  1598  	lockRankMayQueueFinalizer()
  1599  
  1600  	if debug.malloc {
  1601  		if x := preMallocgcDebug(size, typ); x != nil {
  1602  			return x
  1603  		}
  1604  	}
  1605  
  1606  	if gcBlackenEnabled != 0 {
  1607  		deductAssistCredit(size)
  1608  	}
  1609  
  1610  	const sizeclass = 11
  1611  
  1612  	const elemsize = 144
  1613  
  1614  	mp := acquirem()
  1615  	if doubleCheckMalloc {
  1616  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1617  	}
  1618  	mp.mallocing = 1
  1619  
  1620  	checkGCTrigger := false
  1621  	c := getMCache(mp)
  1622  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1623  	span := c.alloc[spc]
  1624  
  1625  	var nextFreeFastResult gclinkptr
  1626  	if span.allocCache != 0 {
  1627  		theBit := sys.TrailingZeros64(span.allocCache)
  1628  		result := span.freeindex + uint16(theBit)
  1629  		if result < span.nelems {
  1630  			freeidx := result + 1
  1631  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1632  				span.allocCache >>= uint(theBit + 1)
  1633  				span.freeindex = freeidx
  1634  				span.allocCount++
  1635  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1636  					144 +
  1637  					span.base())
  1638  			}
  1639  		}
  1640  	}
  1641  	v := nextFreeFastResult
  1642  	if v == 0 {
  1643  		v, span, checkGCTrigger = c.nextFree(spc)
  1644  	}
  1645  	x := unsafe.Pointer(v)
  1646  	if span.needzero != 0 {
  1647  		memclrNoHeapPointers(x, elemsize)
  1648  	}
  1649  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1650  
  1651  		c.scanAlloc += 8
  1652  	} else {
  1653  		dataSize := size
  1654  		x := uintptr(x)
  1655  
  1656  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) {
  1657  			throw("tried to write heap bits, but no heap bits in span")
  1658  		}
  1659  
  1660  		src0 := readUintptr(getGCMask(typ))
  1661  
  1662  		const elemsize = 144
  1663  
  1664  		scanSize := typ.PtrBytes
  1665  		src := src0
  1666  		if typ.Size_ == goarch.PtrSize {
  1667  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1668  		} else {
  1669  
  1670  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1671  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1672  			}
  1673  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1674  				src |= src0 << (i / goarch.PtrSize)
  1675  				scanSize += typ.Size_
  1676  			}
  1677  		}
  1678  
  1679  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1680  		dst := unsafe.Pointer(dstBase)
  1681  		o := (x - span.base()) / goarch.PtrSize
  1682  		i := o / ptrBits
  1683  		j := o % ptrBits
  1684  		const bits uintptr = elemsize / goarch.PtrSize
  1685  
  1686  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1687  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1688  
  1689  			bits0 := ptrBits - j
  1690  			bits1 := bits - bits0
  1691  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1692  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1693  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1694  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1695  		} else {
  1696  
  1697  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1698  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1699  		}
  1700  
  1701  		const doubleCheck = false
  1702  		if doubleCheck {
  1703  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1704  		}
  1705  		if doubleCheckHeapSetType {
  1706  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1707  		}
  1708  		c.scanAlloc += scanSize
  1709  	}
  1710  
  1711  	publicationBarrier()
  1712  
  1713  	if writeBarrier.enabled {
  1714  
  1715  		gcmarknewobject(span, uintptr(x))
  1716  	} else {
  1717  
  1718  		span.freeIndexForScan = span.freeindex
  1719  	}
  1720  
  1721  	c.nextSample -= int64(elemsize)
  1722  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1723  		profilealloc(mp, x, elemsize)
  1724  	}
  1725  	mp.mallocing = 0
  1726  	releasem(mp)
  1727  
  1728  	if checkGCTrigger {
  1729  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1730  			gcStart(t)
  1731  		}
  1732  	}
  1733  	if valgrindenabled {
  1734  		valgrindMalloc(x, size)
  1735  	}
  1736  
  1737  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1738  		if assistG := getg().m.curg; assistG != nil {
  1739  			assistG.gcAssistBytes -= int64(elemsize - size)
  1740  		}
  1741  	}
  1742  
  1743  	if debug.malloc {
  1744  		postMallocgcDebug(x, elemsize, typ)
  1745  	}
  1746  	return x
  1747  }
  1748  
  1749  func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1750  	if doubleCheckMalloc {
  1751  		if gcphase == _GCmarktermination {
  1752  			throw("mallocgc called with gcphase == _GCmarktermination")
  1753  		}
  1754  	}
  1755  
  1756  	lockRankMayQueueFinalizer()
  1757  
  1758  	if debug.malloc {
  1759  		if x := preMallocgcDebug(size, typ); x != nil {
  1760  			return x
  1761  		}
  1762  	}
  1763  
  1764  	if gcBlackenEnabled != 0 {
  1765  		deductAssistCredit(size)
  1766  	}
  1767  
  1768  	const sizeclass = 12
  1769  
  1770  	const elemsize = 160
  1771  
  1772  	mp := acquirem()
  1773  	if doubleCheckMalloc {
  1774  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1775  	}
  1776  	mp.mallocing = 1
  1777  
  1778  	checkGCTrigger := false
  1779  	c := getMCache(mp)
  1780  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1781  	span := c.alloc[spc]
  1782  
  1783  	var nextFreeFastResult gclinkptr
  1784  	if span.allocCache != 0 {
  1785  		theBit := sys.TrailingZeros64(span.allocCache)
  1786  		result := span.freeindex + uint16(theBit)
  1787  		if result < span.nelems {
  1788  			freeidx := result + 1
  1789  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1790  				span.allocCache >>= uint(theBit + 1)
  1791  				span.freeindex = freeidx
  1792  				span.allocCount++
  1793  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1794  					160 +
  1795  					span.base())
  1796  			}
  1797  		}
  1798  	}
  1799  	v := nextFreeFastResult
  1800  	if v == 0 {
  1801  		v, span, checkGCTrigger = c.nextFree(spc)
  1802  	}
  1803  	x := unsafe.Pointer(v)
  1804  	if span.needzero != 0 {
  1805  		memclrNoHeapPointers(x, elemsize)
  1806  	}
  1807  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1808  
  1809  		c.scanAlloc += 8
  1810  	} else {
  1811  		dataSize := size
  1812  		x := uintptr(x)
  1813  
  1814  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) {
  1815  			throw("tried to write heap bits, but no heap bits in span")
  1816  		}
  1817  
  1818  		src0 := readUintptr(getGCMask(typ))
  1819  
  1820  		const elemsize = 160
  1821  
  1822  		scanSize := typ.PtrBytes
  1823  		src := src0
  1824  		if typ.Size_ == goarch.PtrSize {
  1825  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1826  		} else {
  1827  
  1828  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1829  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1830  			}
  1831  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1832  				src |= src0 << (i / goarch.PtrSize)
  1833  				scanSize += typ.Size_
  1834  			}
  1835  		}
  1836  
  1837  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1838  		dst := unsafe.Pointer(dstBase)
  1839  		o := (x - span.base()) / goarch.PtrSize
  1840  		i := o / ptrBits
  1841  		j := o % ptrBits
  1842  		const bits uintptr = elemsize / goarch.PtrSize
  1843  
  1844  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1845  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1846  
  1847  			bits0 := ptrBits - j
  1848  			bits1 := bits - bits0
  1849  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1850  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1851  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1852  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1853  		} else {
  1854  
  1855  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1856  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1857  		}
  1858  
  1859  		const doubleCheck = false
  1860  		if doubleCheck {
  1861  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1862  		}
  1863  		if doubleCheckHeapSetType {
  1864  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1865  		}
  1866  		c.scanAlloc += scanSize
  1867  	}
  1868  
  1869  	publicationBarrier()
  1870  
  1871  	if writeBarrier.enabled {
  1872  
  1873  		gcmarknewobject(span, uintptr(x))
  1874  	} else {
  1875  
  1876  		span.freeIndexForScan = span.freeindex
  1877  	}
  1878  
  1879  	c.nextSample -= int64(elemsize)
  1880  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1881  		profilealloc(mp, x, elemsize)
  1882  	}
  1883  	mp.mallocing = 0
  1884  	releasem(mp)
  1885  
  1886  	if checkGCTrigger {
  1887  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1888  			gcStart(t)
  1889  		}
  1890  	}
  1891  	if valgrindenabled {
  1892  		valgrindMalloc(x, size)
  1893  	}
  1894  
  1895  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1896  		if assistG := getg().m.curg; assistG != nil {
  1897  			assistG.gcAssistBytes -= int64(elemsize - size)
  1898  		}
  1899  	}
  1900  
  1901  	if debug.malloc {
  1902  		postMallocgcDebug(x, elemsize, typ)
  1903  	}
  1904  	return x
  1905  }
  1906  
  1907  func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1908  	if doubleCheckMalloc {
  1909  		if gcphase == _GCmarktermination {
  1910  			throw("mallocgc called with gcphase == _GCmarktermination")
  1911  		}
  1912  	}
  1913  
  1914  	lockRankMayQueueFinalizer()
  1915  
  1916  	if debug.malloc {
  1917  		if x := preMallocgcDebug(size, typ); x != nil {
  1918  			return x
  1919  		}
  1920  	}
  1921  
  1922  	if gcBlackenEnabled != 0 {
  1923  		deductAssistCredit(size)
  1924  	}
  1925  
  1926  	const sizeclass = 13
  1927  
  1928  	const elemsize = 176
  1929  
  1930  	mp := acquirem()
  1931  	if doubleCheckMalloc {
  1932  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1933  	}
  1934  	mp.mallocing = 1
  1935  
  1936  	checkGCTrigger := false
  1937  	c := getMCache(mp)
  1938  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1939  	span := c.alloc[spc]
  1940  
  1941  	var nextFreeFastResult gclinkptr
  1942  	if span.allocCache != 0 {
  1943  		theBit := sys.TrailingZeros64(span.allocCache)
  1944  		result := span.freeindex + uint16(theBit)
  1945  		if result < span.nelems {
  1946  			freeidx := result + 1
  1947  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1948  				span.allocCache >>= uint(theBit + 1)
  1949  				span.freeindex = freeidx
  1950  				span.allocCount++
  1951  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1952  					176 +
  1953  					span.base())
  1954  			}
  1955  		}
  1956  	}
  1957  	v := nextFreeFastResult
  1958  	if v == 0 {
  1959  		v, span, checkGCTrigger = c.nextFree(spc)
  1960  	}
  1961  	x := unsafe.Pointer(v)
  1962  	if span.needzero != 0 {
  1963  		memclrNoHeapPointers(x, elemsize)
  1964  	}
  1965  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1966  
  1967  		c.scanAlloc += 8
  1968  	} else {
  1969  		dataSize := size
  1970  		x := uintptr(x)
  1971  
  1972  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) {
  1973  			throw("tried to write heap bits, but no heap bits in span")
  1974  		}
  1975  
  1976  		src0 := readUintptr(getGCMask(typ))
  1977  
  1978  		const elemsize = 176
  1979  
  1980  		scanSize := typ.PtrBytes
  1981  		src := src0
  1982  		if typ.Size_ == goarch.PtrSize {
  1983  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1984  		} else {
  1985  
  1986  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1987  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1988  			}
  1989  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1990  				src |= src0 << (i / goarch.PtrSize)
  1991  				scanSize += typ.Size_
  1992  			}
  1993  		}
  1994  
  1995  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1996  		dst := unsafe.Pointer(dstBase)
  1997  		o := (x - span.base()) / goarch.PtrSize
  1998  		i := o / ptrBits
  1999  		j := o % ptrBits
  2000  		const bits uintptr = elemsize / goarch.PtrSize
  2001  
  2002  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2003  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2004  
  2005  			bits0 := ptrBits - j
  2006  			bits1 := bits - bits0
  2007  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2008  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2009  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2010  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2011  		} else {
  2012  
  2013  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2014  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2015  		}
  2016  
  2017  		const doubleCheck = false
  2018  		if doubleCheck {
  2019  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2020  		}
  2021  		if doubleCheckHeapSetType {
  2022  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2023  		}
  2024  		c.scanAlloc += scanSize
  2025  	}
  2026  
  2027  	publicationBarrier()
  2028  
  2029  	if writeBarrier.enabled {
  2030  
  2031  		gcmarknewobject(span, uintptr(x))
  2032  	} else {
  2033  
  2034  		span.freeIndexForScan = span.freeindex
  2035  	}
  2036  
  2037  	c.nextSample -= int64(elemsize)
  2038  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2039  		profilealloc(mp, x, elemsize)
  2040  	}
  2041  	mp.mallocing = 0
  2042  	releasem(mp)
  2043  
  2044  	if checkGCTrigger {
  2045  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2046  			gcStart(t)
  2047  		}
  2048  	}
  2049  	if valgrindenabled {
  2050  		valgrindMalloc(x, size)
  2051  	}
  2052  
  2053  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2054  		if assistG := getg().m.curg; assistG != nil {
  2055  			assistG.gcAssistBytes -= int64(elemsize - size)
  2056  		}
  2057  	}
  2058  
  2059  	if debug.malloc {
  2060  		postMallocgcDebug(x, elemsize, typ)
  2061  	}
  2062  	return x
  2063  }
  2064  
  2065  func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2066  	if doubleCheckMalloc {
  2067  		if gcphase == _GCmarktermination {
  2068  			throw("mallocgc called with gcphase == _GCmarktermination")
  2069  		}
  2070  	}
  2071  
  2072  	lockRankMayQueueFinalizer()
  2073  
  2074  	if debug.malloc {
  2075  		if x := preMallocgcDebug(size, typ); x != nil {
  2076  			return x
  2077  		}
  2078  	}
  2079  
  2080  	if gcBlackenEnabled != 0 {
  2081  		deductAssistCredit(size)
  2082  	}
  2083  
  2084  	const sizeclass = 14
  2085  
  2086  	const elemsize = 192
  2087  
  2088  	mp := acquirem()
  2089  	if doubleCheckMalloc {
  2090  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2091  	}
  2092  	mp.mallocing = 1
  2093  
  2094  	checkGCTrigger := false
  2095  	c := getMCache(mp)
  2096  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2097  	span := c.alloc[spc]
  2098  
  2099  	var nextFreeFastResult gclinkptr
  2100  	if span.allocCache != 0 {
  2101  		theBit := sys.TrailingZeros64(span.allocCache)
  2102  		result := span.freeindex + uint16(theBit)
  2103  		if result < span.nelems {
  2104  			freeidx := result + 1
  2105  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2106  				span.allocCache >>= uint(theBit + 1)
  2107  				span.freeindex = freeidx
  2108  				span.allocCount++
  2109  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2110  					192 +
  2111  					span.base())
  2112  			}
  2113  		}
  2114  	}
  2115  	v := nextFreeFastResult
  2116  	if v == 0 {
  2117  		v, span, checkGCTrigger = c.nextFree(spc)
  2118  	}
  2119  	x := unsafe.Pointer(v)
  2120  	if span.needzero != 0 {
  2121  		memclrNoHeapPointers(x, elemsize)
  2122  	}
  2123  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2124  
  2125  		c.scanAlloc += 8
  2126  	} else {
  2127  		dataSize := size
  2128  		x := uintptr(x)
  2129  
  2130  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) {
  2131  			throw("tried to write heap bits, but no heap bits in span")
  2132  		}
  2133  
  2134  		src0 := readUintptr(getGCMask(typ))
  2135  
  2136  		const elemsize = 192
  2137  
  2138  		scanSize := typ.PtrBytes
  2139  		src := src0
  2140  		if typ.Size_ == goarch.PtrSize {
  2141  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2142  		} else {
  2143  
  2144  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2145  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2146  			}
  2147  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2148  				src |= src0 << (i / goarch.PtrSize)
  2149  				scanSize += typ.Size_
  2150  			}
  2151  		}
  2152  
  2153  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2154  		dst := unsafe.Pointer(dstBase)
  2155  		o := (x - span.base()) / goarch.PtrSize
  2156  		i := o / ptrBits
  2157  		j := o % ptrBits
  2158  		const bits uintptr = elemsize / goarch.PtrSize
  2159  
  2160  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2161  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2162  
  2163  			bits0 := ptrBits - j
  2164  			bits1 := bits - bits0
  2165  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2166  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2167  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2168  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2169  		} else {
  2170  
  2171  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2172  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2173  		}
  2174  
  2175  		const doubleCheck = false
  2176  		if doubleCheck {
  2177  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2178  		}
  2179  		if doubleCheckHeapSetType {
  2180  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2181  		}
  2182  		c.scanAlloc += scanSize
  2183  	}
  2184  
  2185  	publicationBarrier()
  2186  
  2187  	if writeBarrier.enabled {
  2188  
  2189  		gcmarknewobject(span, uintptr(x))
  2190  	} else {
  2191  
  2192  		span.freeIndexForScan = span.freeindex
  2193  	}
  2194  
  2195  	c.nextSample -= int64(elemsize)
  2196  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2197  		profilealloc(mp, x, elemsize)
  2198  	}
  2199  	mp.mallocing = 0
  2200  	releasem(mp)
  2201  
  2202  	if checkGCTrigger {
  2203  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2204  			gcStart(t)
  2205  		}
  2206  	}
  2207  	if valgrindenabled {
  2208  		valgrindMalloc(x, size)
  2209  	}
  2210  
  2211  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2212  		if assistG := getg().m.curg; assistG != nil {
  2213  			assistG.gcAssistBytes -= int64(elemsize - size)
  2214  		}
  2215  	}
  2216  
  2217  	if debug.malloc {
  2218  		postMallocgcDebug(x, elemsize, typ)
  2219  	}
  2220  	return x
  2221  }
  2222  
  2223  func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2224  	if doubleCheckMalloc {
  2225  		if gcphase == _GCmarktermination {
  2226  			throw("mallocgc called with gcphase == _GCmarktermination")
  2227  		}
  2228  	}
  2229  
  2230  	lockRankMayQueueFinalizer()
  2231  
  2232  	if debug.malloc {
  2233  		if x := preMallocgcDebug(size, typ); x != nil {
  2234  			return x
  2235  		}
  2236  	}
  2237  
  2238  	if gcBlackenEnabled != 0 {
  2239  		deductAssistCredit(size)
  2240  	}
  2241  
  2242  	const sizeclass = 15
  2243  
  2244  	const elemsize = 208
  2245  
  2246  	mp := acquirem()
  2247  	if doubleCheckMalloc {
  2248  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2249  	}
  2250  	mp.mallocing = 1
  2251  
  2252  	checkGCTrigger := false
  2253  	c := getMCache(mp)
  2254  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2255  	span := c.alloc[spc]
  2256  
  2257  	var nextFreeFastResult gclinkptr
  2258  	if span.allocCache != 0 {
  2259  		theBit := sys.TrailingZeros64(span.allocCache)
  2260  		result := span.freeindex + uint16(theBit)
  2261  		if result < span.nelems {
  2262  			freeidx := result + 1
  2263  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2264  				span.allocCache >>= uint(theBit + 1)
  2265  				span.freeindex = freeidx
  2266  				span.allocCount++
  2267  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2268  					208 +
  2269  					span.base())
  2270  			}
  2271  		}
  2272  	}
  2273  	v := nextFreeFastResult
  2274  	if v == 0 {
  2275  		v, span, checkGCTrigger = c.nextFree(spc)
  2276  	}
  2277  	x := unsafe.Pointer(v)
  2278  	if span.needzero != 0 {
  2279  		memclrNoHeapPointers(x, elemsize)
  2280  	}
  2281  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2282  
  2283  		c.scanAlloc += 8
  2284  	} else {
  2285  		dataSize := size
  2286  		x := uintptr(x)
  2287  
  2288  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) {
  2289  			throw("tried to write heap bits, but no heap bits in span")
  2290  		}
  2291  
  2292  		src0 := readUintptr(getGCMask(typ))
  2293  
  2294  		const elemsize = 208
  2295  
  2296  		scanSize := typ.PtrBytes
  2297  		src := src0
  2298  		if typ.Size_ == goarch.PtrSize {
  2299  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2300  		} else {
  2301  
  2302  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2303  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2304  			}
  2305  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2306  				src |= src0 << (i / goarch.PtrSize)
  2307  				scanSize += typ.Size_
  2308  			}
  2309  		}
  2310  
  2311  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2312  		dst := unsafe.Pointer(dstBase)
  2313  		o := (x - span.base()) / goarch.PtrSize
  2314  		i := o / ptrBits
  2315  		j := o % ptrBits
  2316  		const bits uintptr = elemsize / goarch.PtrSize
  2317  
  2318  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2319  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2320  
  2321  			bits0 := ptrBits - j
  2322  			bits1 := bits - bits0
  2323  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2324  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2325  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2326  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2327  		} else {
  2328  
  2329  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2330  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2331  		}
  2332  
  2333  		const doubleCheck = false
  2334  		if doubleCheck {
  2335  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2336  		}
  2337  		if doubleCheckHeapSetType {
  2338  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2339  		}
  2340  		c.scanAlloc += scanSize
  2341  	}
  2342  
  2343  	publicationBarrier()
  2344  
  2345  	if writeBarrier.enabled {
  2346  
  2347  		gcmarknewobject(span, uintptr(x))
  2348  	} else {
  2349  
  2350  		span.freeIndexForScan = span.freeindex
  2351  	}
  2352  
  2353  	c.nextSample -= int64(elemsize)
  2354  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2355  		profilealloc(mp, x, elemsize)
  2356  	}
  2357  	mp.mallocing = 0
  2358  	releasem(mp)
  2359  
  2360  	if checkGCTrigger {
  2361  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2362  			gcStart(t)
  2363  		}
  2364  	}
  2365  	if valgrindenabled {
  2366  		valgrindMalloc(x, size)
  2367  	}
  2368  
  2369  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2370  		if assistG := getg().m.curg; assistG != nil {
  2371  			assistG.gcAssistBytes -= int64(elemsize - size)
  2372  		}
  2373  	}
  2374  
  2375  	if debug.malloc {
  2376  		postMallocgcDebug(x, elemsize, typ)
  2377  	}
  2378  	return x
  2379  }
  2380  
  2381  func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2382  	if doubleCheckMalloc {
  2383  		if gcphase == _GCmarktermination {
  2384  			throw("mallocgc called with gcphase == _GCmarktermination")
  2385  		}
  2386  	}
  2387  
  2388  	lockRankMayQueueFinalizer()
  2389  
  2390  	if debug.malloc {
  2391  		if x := preMallocgcDebug(size, typ); x != nil {
  2392  			return x
  2393  		}
  2394  	}
  2395  
  2396  	if gcBlackenEnabled != 0 {
  2397  		deductAssistCredit(size)
  2398  	}
  2399  
  2400  	const sizeclass = 16
  2401  
  2402  	const elemsize = 224
  2403  
  2404  	mp := acquirem()
  2405  	if doubleCheckMalloc {
  2406  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2407  	}
  2408  	mp.mallocing = 1
  2409  
  2410  	checkGCTrigger := false
  2411  	c := getMCache(mp)
  2412  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2413  	span := c.alloc[spc]
  2414  
  2415  	var nextFreeFastResult gclinkptr
  2416  	if span.allocCache != 0 {
  2417  		theBit := sys.TrailingZeros64(span.allocCache)
  2418  		result := span.freeindex + uint16(theBit)
  2419  		if result < span.nelems {
  2420  			freeidx := result + 1
  2421  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2422  				span.allocCache >>= uint(theBit + 1)
  2423  				span.freeindex = freeidx
  2424  				span.allocCount++
  2425  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2426  					224 +
  2427  					span.base())
  2428  			}
  2429  		}
  2430  	}
  2431  	v := nextFreeFastResult
  2432  	if v == 0 {
  2433  		v, span, checkGCTrigger = c.nextFree(spc)
  2434  	}
  2435  	x := unsafe.Pointer(v)
  2436  	if span.needzero != 0 {
  2437  		memclrNoHeapPointers(x, elemsize)
  2438  	}
  2439  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2440  
  2441  		c.scanAlloc += 8
  2442  	} else {
  2443  		dataSize := size
  2444  		x := uintptr(x)
  2445  
  2446  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) {
  2447  			throw("tried to write heap bits, but no heap bits in span")
  2448  		}
  2449  
  2450  		src0 := readUintptr(getGCMask(typ))
  2451  
  2452  		const elemsize = 224
  2453  
  2454  		scanSize := typ.PtrBytes
  2455  		src := src0
  2456  		if typ.Size_ == goarch.PtrSize {
  2457  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2458  		} else {
  2459  
  2460  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2461  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2462  			}
  2463  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2464  				src |= src0 << (i / goarch.PtrSize)
  2465  				scanSize += typ.Size_
  2466  			}
  2467  		}
  2468  
  2469  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2470  		dst := unsafe.Pointer(dstBase)
  2471  		o := (x - span.base()) / goarch.PtrSize
  2472  		i := o / ptrBits
  2473  		j := o % ptrBits
  2474  		const bits uintptr = elemsize / goarch.PtrSize
  2475  
  2476  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2477  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2478  
  2479  			bits0 := ptrBits - j
  2480  			bits1 := bits - bits0
  2481  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2482  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2483  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2484  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2485  		} else {
  2486  
  2487  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2488  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2489  		}
  2490  
  2491  		const doubleCheck = false
  2492  		if doubleCheck {
  2493  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2494  		}
  2495  		if doubleCheckHeapSetType {
  2496  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2497  		}
  2498  		c.scanAlloc += scanSize
  2499  	}
  2500  
  2501  	publicationBarrier()
  2502  
  2503  	if writeBarrier.enabled {
  2504  
  2505  		gcmarknewobject(span, uintptr(x))
  2506  	} else {
  2507  
  2508  		span.freeIndexForScan = span.freeindex
  2509  	}
  2510  
  2511  	c.nextSample -= int64(elemsize)
  2512  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2513  		profilealloc(mp, x, elemsize)
  2514  	}
  2515  	mp.mallocing = 0
  2516  	releasem(mp)
  2517  
  2518  	if checkGCTrigger {
  2519  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2520  			gcStart(t)
  2521  		}
  2522  	}
  2523  	if valgrindenabled {
  2524  		valgrindMalloc(x, size)
  2525  	}
  2526  
  2527  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2528  		if assistG := getg().m.curg; assistG != nil {
  2529  			assistG.gcAssistBytes -= int64(elemsize - size)
  2530  		}
  2531  	}
  2532  
  2533  	if debug.malloc {
  2534  		postMallocgcDebug(x, elemsize, typ)
  2535  	}
  2536  	return x
  2537  }
  2538  
  2539  func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2540  	if doubleCheckMalloc {
  2541  		if gcphase == _GCmarktermination {
  2542  			throw("mallocgc called with gcphase == _GCmarktermination")
  2543  		}
  2544  	}
  2545  
  2546  	lockRankMayQueueFinalizer()
  2547  
  2548  	if debug.malloc {
  2549  		if x := preMallocgcDebug(size, typ); x != nil {
  2550  			return x
  2551  		}
  2552  	}
  2553  
  2554  	if gcBlackenEnabled != 0 {
  2555  		deductAssistCredit(size)
  2556  	}
  2557  
  2558  	const sizeclass = 17
  2559  
  2560  	const elemsize = 240
  2561  
  2562  	mp := acquirem()
  2563  	if doubleCheckMalloc {
  2564  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2565  	}
  2566  	mp.mallocing = 1
  2567  
  2568  	checkGCTrigger := false
  2569  	c := getMCache(mp)
  2570  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2571  	span := c.alloc[spc]
  2572  
  2573  	var nextFreeFastResult gclinkptr
  2574  	if span.allocCache != 0 {
  2575  		theBit := sys.TrailingZeros64(span.allocCache)
  2576  		result := span.freeindex + uint16(theBit)
  2577  		if result < span.nelems {
  2578  			freeidx := result + 1
  2579  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2580  				span.allocCache >>= uint(theBit + 1)
  2581  				span.freeindex = freeidx
  2582  				span.allocCount++
  2583  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2584  					240 +
  2585  					span.base())
  2586  			}
  2587  		}
  2588  	}
  2589  	v := nextFreeFastResult
  2590  	if v == 0 {
  2591  		v, span, checkGCTrigger = c.nextFree(spc)
  2592  	}
  2593  	x := unsafe.Pointer(v)
  2594  	if span.needzero != 0 {
  2595  		memclrNoHeapPointers(x, elemsize)
  2596  	}
  2597  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2598  
  2599  		c.scanAlloc += 8
  2600  	} else {
  2601  		dataSize := size
  2602  		x := uintptr(x)
  2603  
  2604  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) {
  2605  			throw("tried to write heap bits, but no heap bits in span")
  2606  		}
  2607  
  2608  		src0 := readUintptr(getGCMask(typ))
  2609  
  2610  		const elemsize = 240
  2611  
  2612  		scanSize := typ.PtrBytes
  2613  		src := src0
  2614  		if typ.Size_ == goarch.PtrSize {
  2615  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2616  		} else {
  2617  
  2618  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2619  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2620  			}
  2621  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2622  				src |= src0 << (i / goarch.PtrSize)
  2623  				scanSize += typ.Size_
  2624  			}
  2625  		}
  2626  
  2627  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2628  		dst := unsafe.Pointer(dstBase)
  2629  		o := (x - span.base()) / goarch.PtrSize
  2630  		i := o / ptrBits
  2631  		j := o % ptrBits
  2632  		const bits uintptr = elemsize / goarch.PtrSize
  2633  
  2634  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2635  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2636  
  2637  			bits0 := ptrBits - j
  2638  			bits1 := bits - bits0
  2639  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2640  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2641  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2642  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2643  		} else {
  2644  
  2645  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2646  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2647  		}
  2648  
  2649  		const doubleCheck = false
  2650  		if doubleCheck {
  2651  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2652  		}
  2653  		if doubleCheckHeapSetType {
  2654  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2655  		}
  2656  		c.scanAlloc += scanSize
  2657  	}
  2658  
  2659  	publicationBarrier()
  2660  
  2661  	if writeBarrier.enabled {
  2662  
  2663  		gcmarknewobject(span, uintptr(x))
  2664  	} else {
  2665  
  2666  		span.freeIndexForScan = span.freeindex
  2667  	}
  2668  
  2669  	c.nextSample -= int64(elemsize)
  2670  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2671  		profilealloc(mp, x, elemsize)
  2672  	}
  2673  	mp.mallocing = 0
  2674  	releasem(mp)
  2675  
  2676  	if checkGCTrigger {
  2677  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2678  			gcStart(t)
  2679  		}
  2680  	}
  2681  	if valgrindenabled {
  2682  		valgrindMalloc(x, size)
  2683  	}
  2684  
  2685  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2686  		if assistG := getg().m.curg; assistG != nil {
  2687  			assistG.gcAssistBytes -= int64(elemsize - size)
  2688  		}
  2689  	}
  2690  
  2691  	if debug.malloc {
  2692  		postMallocgcDebug(x, elemsize, typ)
  2693  	}
  2694  	return x
  2695  }
  2696  
  2697  func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2698  	if doubleCheckMalloc {
  2699  		if gcphase == _GCmarktermination {
  2700  			throw("mallocgc called with gcphase == _GCmarktermination")
  2701  		}
  2702  	}
  2703  
  2704  	lockRankMayQueueFinalizer()
  2705  
  2706  	if debug.malloc {
  2707  		if x := preMallocgcDebug(size, typ); x != nil {
  2708  			return x
  2709  		}
  2710  	}
  2711  
  2712  	if gcBlackenEnabled != 0 {
  2713  		deductAssistCredit(size)
  2714  	}
  2715  
  2716  	const sizeclass = 18
  2717  
  2718  	const elemsize = 256
  2719  
  2720  	mp := acquirem()
  2721  	if doubleCheckMalloc {
  2722  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2723  	}
  2724  	mp.mallocing = 1
  2725  
  2726  	checkGCTrigger := false
  2727  	c := getMCache(mp)
  2728  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2729  	span := c.alloc[spc]
  2730  
  2731  	var nextFreeFastResult gclinkptr
  2732  	if span.allocCache != 0 {
  2733  		theBit := sys.TrailingZeros64(span.allocCache)
  2734  		result := span.freeindex + uint16(theBit)
  2735  		if result < span.nelems {
  2736  			freeidx := result + 1
  2737  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2738  				span.allocCache >>= uint(theBit + 1)
  2739  				span.freeindex = freeidx
  2740  				span.allocCount++
  2741  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2742  					256 +
  2743  					span.base())
  2744  			}
  2745  		}
  2746  	}
  2747  	v := nextFreeFastResult
  2748  	if v == 0 {
  2749  		v, span, checkGCTrigger = c.nextFree(spc)
  2750  	}
  2751  	x := unsafe.Pointer(v)
  2752  	if span.needzero != 0 {
  2753  		memclrNoHeapPointers(x, elemsize)
  2754  	}
  2755  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2756  
  2757  		c.scanAlloc += 8
  2758  	} else {
  2759  		dataSize := size
  2760  		x := uintptr(x)
  2761  
  2762  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) {
  2763  			throw("tried to write heap bits, but no heap bits in span")
  2764  		}
  2765  
  2766  		src0 := readUintptr(getGCMask(typ))
  2767  
  2768  		const elemsize = 256
  2769  
  2770  		scanSize := typ.PtrBytes
  2771  		src := src0
  2772  		if typ.Size_ == goarch.PtrSize {
  2773  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2774  		} else {
  2775  
  2776  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2777  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2778  			}
  2779  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2780  				src |= src0 << (i / goarch.PtrSize)
  2781  				scanSize += typ.Size_
  2782  			}
  2783  		}
  2784  
  2785  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2786  		dst := unsafe.Pointer(dstBase)
  2787  		o := (x - span.base()) / goarch.PtrSize
  2788  		i := o / ptrBits
  2789  		j := o % ptrBits
  2790  		const bits uintptr = elemsize / goarch.PtrSize
  2791  
  2792  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2793  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2794  
  2795  			bits0 := ptrBits - j
  2796  			bits1 := bits - bits0
  2797  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2798  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2799  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2800  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2801  		} else {
  2802  
  2803  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2804  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2805  		}
  2806  
  2807  		const doubleCheck = false
  2808  		if doubleCheck {
  2809  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2810  		}
  2811  		if doubleCheckHeapSetType {
  2812  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2813  		}
  2814  		c.scanAlloc += scanSize
  2815  	}
  2816  
  2817  	publicationBarrier()
  2818  
  2819  	if writeBarrier.enabled {
  2820  
  2821  		gcmarknewobject(span, uintptr(x))
  2822  	} else {
  2823  
  2824  		span.freeIndexForScan = span.freeindex
  2825  	}
  2826  
  2827  	c.nextSample -= int64(elemsize)
  2828  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2829  		profilealloc(mp, x, elemsize)
  2830  	}
  2831  	mp.mallocing = 0
  2832  	releasem(mp)
  2833  
  2834  	if checkGCTrigger {
  2835  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2836  			gcStart(t)
  2837  		}
  2838  	}
  2839  	if valgrindenabled {
  2840  		valgrindMalloc(x, size)
  2841  	}
  2842  
  2843  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2844  		if assistG := getg().m.curg; assistG != nil {
  2845  			assistG.gcAssistBytes -= int64(elemsize - size)
  2846  		}
  2847  	}
  2848  
  2849  	if debug.malloc {
  2850  		postMallocgcDebug(x, elemsize, typ)
  2851  	}
  2852  	return x
  2853  }
  2854  
  2855  func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2856  	if doubleCheckMalloc {
  2857  		if gcphase == _GCmarktermination {
  2858  			throw("mallocgc called with gcphase == _GCmarktermination")
  2859  		}
  2860  	}
  2861  
  2862  	lockRankMayQueueFinalizer()
  2863  
  2864  	if debug.malloc {
  2865  		if x := preMallocgcDebug(size, typ); x != nil {
  2866  			return x
  2867  		}
  2868  	}
  2869  
  2870  	if gcBlackenEnabled != 0 {
  2871  		deductAssistCredit(size)
  2872  	}
  2873  
  2874  	const sizeclass = 19
  2875  
  2876  	const elemsize = 288
  2877  
  2878  	mp := acquirem()
  2879  	if doubleCheckMalloc {
  2880  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2881  	}
  2882  	mp.mallocing = 1
  2883  
  2884  	checkGCTrigger := false
  2885  	c := getMCache(mp)
  2886  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2887  	span := c.alloc[spc]
  2888  
  2889  	var nextFreeFastResult gclinkptr
  2890  	if span.allocCache != 0 {
  2891  		theBit := sys.TrailingZeros64(span.allocCache)
  2892  		result := span.freeindex + uint16(theBit)
  2893  		if result < span.nelems {
  2894  			freeidx := result + 1
  2895  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2896  				span.allocCache >>= uint(theBit + 1)
  2897  				span.freeindex = freeidx
  2898  				span.allocCount++
  2899  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2900  					288 +
  2901  					span.base())
  2902  			}
  2903  		}
  2904  	}
  2905  	v := nextFreeFastResult
  2906  	if v == 0 {
  2907  		v, span, checkGCTrigger = c.nextFree(spc)
  2908  	}
  2909  	x := unsafe.Pointer(v)
  2910  	if span.needzero != 0 {
  2911  		memclrNoHeapPointers(x, elemsize)
  2912  	}
  2913  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2914  
  2915  		c.scanAlloc += 8
  2916  	} else {
  2917  		dataSize := size
  2918  		x := uintptr(x)
  2919  
  2920  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) {
  2921  			throw("tried to write heap bits, but no heap bits in span")
  2922  		}
  2923  
  2924  		src0 := readUintptr(getGCMask(typ))
  2925  
  2926  		const elemsize = 288
  2927  
  2928  		scanSize := typ.PtrBytes
  2929  		src := src0
  2930  		if typ.Size_ == goarch.PtrSize {
  2931  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2932  		} else {
  2933  
  2934  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2935  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2936  			}
  2937  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2938  				src |= src0 << (i / goarch.PtrSize)
  2939  				scanSize += typ.Size_
  2940  			}
  2941  		}
  2942  
  2943  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2944  		dst := unsafe.Pointer(dstBase)
  2945  		o := (x - span.base()) / goarch.PtrSize
  2946  		i := o / ptrBits
  2947  		j := o % ptrBits
  2948  		const bits uintptr = elemsize / goarch.PtrSize
  2949  
  2950  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2951  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2952  
  2953  			bits0 := ptrBits - j
  2954  			bits1 := bits - bits0
  2955  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2956  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2957  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2958  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2959  		} else {
  2960  
  2961  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2962  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2963  		}
  2964  
  2965  		const doubleCheck = false
  2966  		if doubleCheck {
  2967  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2968  		}
  2969  		if doubleCheckHeapSetType {
  2970  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2971  		}
  2972  		c.scanAlloc += scanSize
  2973  	}
  2974  
  2975  	publicationBarrier()
  2976  
  2977  	if writeBarrier.enabled {
  2978  
  2979  		gcmarknewobject(span, uintptr(x))
  2980  	} else {
  2981  
  2982  		span.freeIndexForScan = span.freeindex
  2983  	}
  2984  
  2985  	c.nextSample -= int64(elemsize)
  2986  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2987  		profilealloc(mp, x, elemsize)
  2988  	}
  2989  	mp.mallocing = 0
  2990  	releasem(mp)
  2991  
  2992  	if checkGCTrigger {
  2993  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2994  			gcStart(t)
  2995  		}
  2996  	}
  2997  	if valgrindenabled {
  2998  		valgrindMalloc(x, size)
  2999  	}
  3000  
  3001  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3002  		if assistG := getg().m.curg; assistG != nil {
  3003  			assistG.gcAssistBytes -= int64(elemsize - size)
  3004  		}
  3005  	}
  3006  
  3007  	if debug.malloc {
  3008  		postMallocgcDebug(x, elemsize, typ)
  3009  	}
  3010  	return x
  3011  }
  3012  
  3013  func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3014  	if doubleCheckMalloc {
  3015  		if gcphase == _GCmarktermination {
  3016  			throw("mallocgc called with gcphase == _GCmarktermination")
  3017  		}
  3018  	}
  3019  
  3020  	lockRankMayQueueFinalizer()
  3021  
  3022  	if debug.malloc {
  3023  		if x := preMallocgcDebug(size, typ); x != nil {
  3024  			return x
  3025  		}
  3026  	}
  3027  
  3028  	if gcBlackenEnabled != 0 {
  3029  		deductAssistCredit(size)
  3030  	}
  3031  
  3032  	const sizeclass = 20
  3033  
  3034  	const elemsize = 320
  3035  
  3036  	mp := acquirem()
  3037  	if doubleCheckMalloc {
  3038  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3039  	}
  3040  	mp.mallocing = 1
  3041  
  3042  	checkGCTrigger := false
  3043  	c := getMCache(mp)
  3044  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3045  	span := c.alloc[spc]
  3046  
  3047  	var nextFreeFastResult gclinkptr
  3048  	if span.allocCache != 0 {
  3049  		theBit := sys.TrailingZeros64(span.allocCache)
  3050  		result := span.freeindex + uint16(theBit)
  3051  		if result < span.nelems {
  3052  			freeidx := result + 1
  3053  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3054  				span.allocCache >>= uint(theBit + 1)
  3055  				span.freeindex = freeidx
  3056  				span.allocCount++
  3057  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3058  					320 +
  3059  					span.base())
  3060  			}
  3061  		}
  3062  	}
  3063  	v := nextFreeFastResult
  3064  	if v == 0 {
  3065  		v, span, checkGCTrigger = c.nextFree(spc)
  3066  	}
  3067  	x := unsafe.Pointer(v)
  3068  	if span.needzero != 0 {
  3069  		memclrNoHeapPointers(x, elemsize)
  3070  	}
  3071  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3072  
  3073  		c.scanAlloc += 8
  3074  	} else {
  3075  		dataSize := size
  3076  		x := uintptr(x)
  3077  
  3078  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) {
  3079  			throw("tried to write heap bits, but no heap bits in span")
  3080  		}
  3081  
  3082  		src0 := readUintptr(getGCMask(typ))
  3083  
  3084  		const elemsize = 320
  3085  
  3086  		scanSize := typ.PtrBytes
  3087  		src := src0
  3088  		if typ.Size_ == goarch.PtrSize {
  3089  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3090  		} else {
  3091  
  3092  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3093  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3094  			}
  3095  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3096  				src |= src0 << (i / goarch.PtrSize)
  3097  				scanSize += typ.Size_
  3098  			}
  3099  		}
  3100  
  3101  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3102  		dst := unsafe.Pointer(dstBase)
  3103  		o := (x - span.base()) / goarch.PtrSize
  3104  		i := o / ptrBits
  3105  		j := o % ptrBits
  3106  		const bits uintptr = elemsize / goarch.PtrSize
  3107  
  3108  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3109  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3110  
  3111  			bits0 := ptrBits - j
  3112  			bits1 := bits - bits0
  3113  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3114  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3115  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3116  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3117  		} else {
  3118  
  3119  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3120  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3121  		}
  3122  
  3123  		const doubleCheck = false
  3124  		if doubleCheck {
  3125  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3126  		}
  3127  		if doubleCheckHeapSetType {
  3128  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3129  		}
  3130  		c.scanAlloc += scanSize
  3131  	}
  3132  
  3133  	publicationBarrier()
  3134  
  3135  	if writeBarrier.enabled {
  3136  
  3137  		gcmarknewobject(span, uintptr(x))
  3138  	} else {
  3139  
  3140  		span.freeIndexForScan = span.freeindex
  3141  	}
  3142  
  3143  	c.nextSample -= int64(elemsize)
  3144  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3145  		profilealloc(mp, x, elemsize)
  3146  	}
  3147  	mp.mallocing = 0
  3148  	releasem(mp)
  3149  
  3150  	if checkGCTrigger {
  3151  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3152  			gcStart(t)
  3153  		}
  3154  	}
  3155  	if valgrindenabled {
  3156  		valgrindMalloc(x, size)
  3157  	}
  3158  
  3159  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3160  		if assistG := getg().m.curg; assistG != nil {
  3161  			assistG.gcAssistBytes -= int64(elemsize - size)
  3162  		}
  3163  	}
  3164  
  3165  	if debug.malloc {
  3166  		postMallocgcDebug(x, elemsize, typ)
  3167  	}
  3168  	return x
  3169  }
  3170  
  3171  func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3172  	if doubleCheckMalloc {
  3173  		if gcphase == _GCmarktermination {
  3174  			throw("mallocgc called with gcphase == _GCmarktermination")
  3175  		}
  3176  	}
  3177  
  3178  	lockRankMayQueueFinalizer()
  3179  
  3180  	if debug.malloc {
  3181  		if x := preMallocgcDebug(size, typ); x != nil {
  3182  			return x
  3183  		}
  3184  	}
  3185  
  3186  	if gcBlackenEnabled != 0 {
  3187  		deductAssistCredit(size)
  3188  	}
  3189  
  3190  	const sizeclass = 21
  3191  
  3192  	const elemsize = 352
  3193  
  3194  	mp := acquirem()
  3195  	if doubleCheckMalloc {
  3196  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3197  	}
  3198  	mp.mallocing = 1
  3199  
  3200  	checkGCTrigger := false
  3201  	c := getMCache(mp)
  3202  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3203  	span := c.alloc[spc]
  3204  
  3205  	var nextFreeFastResult gclinkptr
  3206  	if span.allocCache != 0 {
  3207  		theBit := sys.TrailingZeros64(span.allocCache)
  3208  		result := span.freeindex + uint16(theBit)
  3209  		if result < span.nelems {
  3210  			freeidx := result + 1
  3211  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3212  				span.allocCache >>= uint(theBit + 1)
  3213  				span.freeindex = freeidx
  3214  				span.allocCount++
  3215  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3216  					352 +
  3217  					span.base())
  3218  			}
  3219  		}
  3220  	}
  3221  	v := nextFreeFastResult
  3222  	if v == 0 {
  3223  		v, span, checkGCTrigger = c.nextFree(spc)
  3224  	}
  3225  	x := unsafe.Pointer(v)
  3226  	if span.needzero != 0 {
  3227  		memclrNoHeapPointers(x, elemsize)
  3228  	}
  3229  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3230  
  3231  		c.scanAlloc += 8
  3232  	} else {
  3233  		dataSize := size
  3234  		x := uintptr(x)
  3235  
  3236  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) {
  3237  			throw("tried to write heap bits, but no heap bits in span")
  3238  		}
  3239  
  3240  		src0 := readUintptr(getGCMask(typ))
  3241  
  3242  		const elemsize = 352
  3243  
  3244  		scanSize := typ.PtrBytes
  3245  		src := src0
  3246  		if typ.Size_ == goarch.PtrSize {
  3247  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3248  		} else {
  3249  
  3250  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3251  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3252  			}
  3253  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3254  				src |= src0 << (i / goarch.PtrSize)
  3255  				scanSize += typ.Size_
  3256  			}
  3257  		}
  3258  
  3259  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3260  		dst := unsafe.Pointer(dstBase)
  3261  		o := (x - span.base()) / goarch.PtrSize
  3262  		i := o / ptrBits
  3263  		j := o % ptrBits
  3264  		const bits uintptr = elemsize / goarch.PtrSize
  3265  
  3266  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3267  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3268  
  3269  			bits0 := ptrBits - j
  3270  			bits1 := bits - bits0
  3271  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3272  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3273  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3274  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3275  		} else {
  3276  
  3277  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3278  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3279  		}
  3280  
  3281  		const doubleCheck = false
  3282  		if doubleCheck {
  3283  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3284  		}
  3285  		if doubleCheckHeapSetType {
  3286  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3287  		}
  3288  		c.scanAlloc += scanSize
  3289  	}
  3290  
  3291  	publicationBarrier()
  3292  
  3293  	if writeBarrier.enabled {
  3294  
  3295  		gcmarknewobject(span, uintptr(x))
  3296  	} else {
  3297  
  3298  		span.freeIndexForScan = span.freeindex
  3299  	}
  3300  
  3301  	c.nextSample -= int64(elemsize)
  3302  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3303  		profilealloc(mp, x, elemsize)
  3304  	}
  3305  	mp.mallocing = 0
  3306  	releasem(mp)
  3307  
  3308  	if checkGCTrigger {
  3309  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3310  			gcStart(t)
  3311  		}
  3312  	}
  3313  	if valgrindenabled {
  3314  		valgrindMalloc(x, size)
  3315  	}
  3316  
  3317  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3318  		if assistG := getg().m.curg; assistG != nil {
  3319  			assistG.gcAssistBytes -= int64(elemsize - size)
  3320  		}
  3321  	}
  3322  
  3323  	if debug.malloc {
  3324  		postMallocgcDebug(x, elemsize, typ)
  3325  	}
  3326  	return x
  3327  }
  3328  
  3329  func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3330  	if doubleCheckMalloc {
  3331  		if gcphase == _GCmarktermination {
  3332  			throw("mallocgc called with gcphase == _GCmarktermination")
  3333  		}
  3334  	}
  3335  
  3336  	lockRankMayQueueFinalizer()
  3337  
  3338  	if debug.malloc {
  3339  		if x := preMallocgcDebug(size, typ); x != nil {
  3340  			return x
  3341  		}
  3342  	}
  3343  
  3344  	if gcBlackenEnabled != 0 {
  3345  		deductAssistCredit(size)
  3346  	}
  3347  
  3348  	const sizeclass = 22
  3349  
  3350  	const elemsize = 384
  3351  
  3352  	mp := acquirem()
  3353  	if doubleCheckMalloc {
  3354  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3355  	}
  3356  	mp.mallocing = 1
  3357  
  3358  	checkGCTrigger := false
  3359  	c := getMCache(mp)
  3360  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3361  	span := c.alloc[spc]
  3362  
  3363  	var nextFreeFastResult gclinkptr
  3364  	if span.allocCache != 0 {
  3365  		theBit := sys.TrailingZeros64(span.allocCache)
  3366  		result := span.freeindex + uint16(theBit)
  3367  		if result < span.nelems {
  3368  			freeidx := result + 1
  3369  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3370  				span.allocCache >>= uint(theBit + 1)
  3371  				span.freeindex = freeidx
  3372  				span.allocCount++
  3373  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3374  					384 +
  3375  					span.base())
  3376  			}
  3377  		}
  3378  	}
  3379  	v := nextFreeFastResult
  3380  	if v == 0 {
  3381  		v, span, checkGCTrigger = c.nextFree(spc)
  3382  	}
  3383  	x := unsafe.Pointer(v)
  3384  	if span.needzero != 0 {
  3385  		memclrNoHeapPointers(x, elemsize)
  3386  	}
  3387  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3388  
  3389  		c.scanAlloc += 8
  3390  	} else {
  3391  		dataSize := size
  3392  		x := uintptr(x)
  3393  
  3394  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) {
  3395  			throw("tried to write heap bits, but no heap bits in span")
  3396  		}
  3397  
  3398  		src0 := readUintptr(getGCMask(typ))
  3399  
  3400  		const elemsize = 384
  3401  
  3402  		scanSize := typ.PtrBytes
  3403  		src := src0
  3404  		if typ.Size_ == goarch.PtrSize {
  3405  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3406  		} else {
  3407  
  3408  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3409  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3410  			}
  3411  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3412  				src |= src0 << (i / goarch.PtrSize)
  3413  				scanSize += typ.Size_
  3414  			}
  3415  		}
  3416  
  3417  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3418  		dst := unsafe.Pointer(dstBase)
  3419  		o := (x - span.base()) / goarch.PtrSize
  3420  		i := o / ptrBits
  3421  		j := o % ptrBits
  3422  		const bits uintptr = elemsize / goarch.PtrSize
  3423  
  3424  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3425  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3426  
  3427  			bits0 := ptrBits - j
  3428  			bits1 := bits - bits0
  3429  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3430  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3431  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3432  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3433  		} else {
  3434  
  3435  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3436  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3437  		}
  3438  
  3439  		const doubleCheck = false
  3440  		if doubleCheck {
  3441  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3442  		}
  3443  		if doubleCheckHeapSetType {
  3444  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3445  		}
  3446  		c.scanAlloc += scanSize
  3447  	}
  3448  
  3449  	publicationBarrier()
  3450  
  3451  	if writeBarrier.enabled {
  3452  
  3453  		gcmarknewobject(span, uintptr(x))
  3454  	} else {
  3455  
  3456  		span.freeIndexForScan = span.freeindex
  3457  	}
  3458  
  3459  	c.nextSample -= int64(elemsize)
  3460  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3461  		profilealloc(mp, x, elemsize)
  3462  	}
  3463  	mp.mallocing = 0
  3464  	releasem(mp)
  3465  
  3466  	if checkGCTrigger {
  3467  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3468  			gcStart(t)
  3469  		}
  3470  	}
  3471  	if valgrindenabled {
  3472  		valgrindMalloc(x, size)
  3473  	}
  3474  
  3475  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3476  		if assistG := getg().m.curg; assistG != nil {
  3477  			assistG.gcAssistBytes -= int64(elemsize - size)
  3478  		}
  3479  	}
  3480  
  3481  	if debug.malloc {
  3482  		postMallocgcDebug(x, elemsize, typ)
  3483  	}
  3484  	return x
  3485  }
  3486  
  3487  func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3488  	if doubleCheckMalloc {
  3489  		if gcphase == _GCmarktermination {
  3490  			throw("mallocgc called with gcphase == _GCmarktermination")
  3491  		}
  3492  	}
  3493  
  3494  	lockRankMayQueueFinalizer()
  3495  
  3496  	if debug.malloc {
  3497  		if x := preMallocgcDebug(size, typ); x != nil {
  3498  			return x
  3499  		}
  3500  	}
  3501  
  3502  	if gcBlackenEnabled != 0 {
  3503  		deductAssistCredit(size)
  3504  	}
  3505  
  3506  	const sizeclass = 23
  3507  
  3508  	const elemsize = 416
  3509  
  3510  	mp := acquirem()
  3511  	if doubleCheckMalloc {
  3512  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3513  	}
  3514  	mp.mallocing = 1
  3515  
  3516  	checkGCTrigger := false
  3517  	c := getMCache(mp)
  3518  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3519  	span := c.alloc[spc]
  3520  
  3521  	var nextFreeFastResult gclinkptr
  3522  	if span.allocCache != 0 {
  3523  		theBit := sys.TrailingZeros64(span.allocCache)
  3524  		result := span.freeindex + uint16(theBit)
  3525  		if result < span.nelems {
  3526  			freeidx := result + 1
  3527  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3528  				span.allocCache >>= uint(theBit + 1)
  3529  				span.freeindex = freeidx
  3530  				span.allocCount++
  3531  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3532  					416 +
  3533  					span.base())
  3534  			}
  3535  		}
  3536  	}
  3537  	v := nextFreeFastResult
  3538  	if v == 0 {
  3539  		v, span, checkGCTrigger = c.nextFree(spc)
  3540  	}
  3541  	x := unsafe.Pointer(v)
  3542  	if span.needzero != 0 {
  3543  		memclrNoHeapPointers(x, elemsize)
  3544  	}
  3545  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3546  
  3547  		c.scanAlloc += 8
  3548  	} else {
  3549  		dataSize := size
  3550  		x := uintptr(x)
  3551  
  3552  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) {
  3553  			throw("tried to write heap bits, but no heap bits in span")
  3554  		}
  3555  
  3556  		src0 := readUintptr(getGCMask(typ))
  3557  
  3558  		const elemsize = 416
  3559  
  3560  		scanSize := typ.PtrBytes
  3561  		src := src0
  3562  		if typ.Size_ == goarch.PtrSize {
  3563  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3564  		} else {
  3565  
  3566  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3567  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3568  			}
  3569  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3570  				src |= src0 << (i / goarch.PtrSize)
  3571  				scanSize += typ.Size_
  3572  			}
  3573  		}
  3574  
  3575  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3576  		dst := unsafe.Pointer(dstBase)
  3577  		o := (x - span.base()) / goarch.PtrSize
  3578  		i := o / ptrBits
  3579  		j := o % ptrBits
  3580  		const bits uintptr = elemsize / goarch.PtrSize
  3581  
  3582  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3583  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3584  
  3585  			bits0 := ptrBits - j
  3586  			bits1 := bits - bits0
  3587  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3588  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3589  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3590  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3591  		} else {
  3592  
  3593  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3594  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3595  		}
  3596  
  3597  		const doubleCheck = false
  3598  		if doubleCheck {
  3599  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3600  		}
  3601  		if doubleCheckHeapSetType {
  3602  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3603  		}
  3604  		c.scanAlloc += scanSize
  3605  	}
  3606  
  3607  	publicationBarrier()
  3608  
  3609  	if writeBarrier.enabled {
  3610  
  3611  		gcmarknewobject(span, uintptr(x))
  3612  	} else {
  3613  
  3614  		span.freeIndexForScan = span.freeindex
  3615  	}
  3616  
  3617  	c.nextSample -= int64(elemsize)
  3618  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3619  		profilealloc(mp, x, elemsize)
  3620  	}
  3621  	mp.mallocing = 0
  3622  	releasem(mp)
  3623  
  3624  	if checkGCTrigger {
  3625  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3626  			gcStart(t)
  3627  		}
  3628  	}
  3629  	if valgrindenabled {
  3630  		valgrindMalloc(x, size)
  3631  	}
  3632  
  3633  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3634  		if assistG := getg().m.curg; assistG != nil {
  3635  			assistG.gcAssistBytes -= int64(elemsize - size)
  3636  		}
  3637  	}
  3638  
  3639  	if debug.malloc {
  3640  		postMallocgcDebug(x, elemsize, typ)
  3641  	}
  3642  	return x
  3643  }
  3644  
  3645  func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3646  	if doubleCheckMalloc {
  3647  		if gcphase == _GCmarktermination {
  3648  			throw("mallocgc called with gcphase == _GCmarktermination")
  3649  		}
  3650  	}
  3651  
  3652  	lockRankMayQueueFinalizer()
  3653  
  3654  	if debug.malloc {
  3655  		if x := preMallocgcDebug(size, typ); x != nil {
  3656  			return x
  3657  		}
  3658  	}
  3659  
  3660  	if gcBlackenEnabled != 0 {
  3661  		deductAssistCredit(size)
  3662  	}
  3663  
  3664  	const sizeclass = 24
  3665  
  3666  	const elemsize = 448
  3667  
  3668  	mp := acquirem()
  3669  	if doubleCheckMalloc {
  3670  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3671  	}
  3672  	mp.mallocing = 1
  3673  
  3674  	checkGCTrigger := false
  3675  	c := getMCache(mp)
  3676  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3677  	span := c.alloc[spc]
  3678  
  3679  	var nextFreeFastResult gclinkptr
  3680  	if span.allocCache != 0 {
  3681  		theBit := sys.TrailingZeros64(span.allocCache)
  3682  		result := span.freeindex + uint16(theBit)
  3683  		if result < span.nelems {
  3684  			freeidx := result + 1
  3685  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3686  				span.allocCache >>= uint(theBit + 1)
  3687  				span.freeindex = freeidx
  3688  				span.allocCount++
  3689  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3690  					448 +
  3691  					span.base())
  3692  			}
  3693  		}
  3694  	}
  3695  	v := nextFreeFastResult
  3696  	if v == 0 {
  3697  		v, span, checkGCTrigger = c.nextFree(spc)
  3698  	}
  3699  	x := unsafe.Pointer(v)
  3700  	if span.needzero != 0 {
  3701  		memclrNoHeapPointers(x, elemsize)
  3702  	}
  3703  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3704  
  3705  		c.scanAlloc += 8
  3706  	} else {
  3707  		dataSize := size
  3708  		x := uintptr(x)
  3709  
  3710  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) {
  3711  			throw("tried to write heap bits, but no heap bits in span")
  3712  		}
  3713  
  3714  		src0 := readUintptr(getGCMask(typ))
  3715  
  3716  		const elemsize = 448
  3717  
  3718  		scanSize := typ.PtrBytes
  3719  		src := src0
  3720  		if typ.Size_ == goarch.PtrSize {
  3721  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3722  		} else {
  3723  
  3724  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3725  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3726  			}
  3727  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3728  				src |= src0 << (i / goarch.PtrSize)
  3729  				scanSize += typ.Size_
  3730  			}
  3731  		}
  3732  
  3733  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3734  		dst := unsafe.Pointer(dstBase)
  3735  		o := (x - span.base()) / goarch.PtrSize
  3736  		i := o / ptrBits
  3737  		j := o % ptrBits
  3738  		const bits uintptr = elemsize / goarch.PtrSize
  3739  
  3740  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3741  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3742  
  3743  			bits0 := ptrBits - j
  3744  			bits1 := bits - bits0
  3745  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3746  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3747  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3748  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3749  		} else {
  3750  
  3751  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3752  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3753  		}
  3754  
  3755  		const doubleCheck = false
  3756  		if doubleCheck {
  3757  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3758  		}
  3759  		if doubleCheckHeapSetType {
  3760  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3761  		}
  3762  		c.scanAlloc += scanSize
  3763  	}
  3764  
  3765  	publicationBarrier()
  3766  
  3767  	if writeBarrier.enabled {
  3768  
  3769  		gcmarknewobject(span, uintptr(x))
  3770  	} else {
  3771  
  3772  		span.freeIndexForScan = span.freeindex
  3773  	}
  3774  
  3775  	c.nextSample -= int64(elemsize)
  3776  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3777  		profilealloc(mp, x, elemsize)
  3778  	}
  3779  	mp.mallocing = 0
  3780  	releasem(mp)
  3781  
  3782  	if checkGCTrigger {
  3783  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3784  			gcStart(t)
  3785  		}
  3786  	}
  3787  	if valgrindenabled {
  3788  		valgrindMalloc(x, size)
  3789  	}
  3790  
  3791  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3792  		if assistG := getg().m.curg; assistG != nil {
  3793  			assistG.gcAssistBytes -= int64(elemsize - size)
  3794  		}
  3795  	}
  3796  
  3797  	if debug.malloc {
  3798  		postMallocgcDebug(x, elemsize, typ)
  3799  	}
  3800  	return x
  3801  }
  3802  
  3803  func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3804  	if doubleCheckMalloc {
  3805  		if gcphase == _GCmarktermination {
  3806  			throw("mallocgc called with gcphase == _GCmarktermination")
  3807  		}
  3808  	}
  3809  
  3810  	lockRankMayQueueFinalizer()
  3811  
  3812  	if debug.malloc {
  3813  		if x := preMallocgcDebug(size, typ); x != nil {
  3814  			return x
  3815  		}
  3816  	}
  3817  
  3818  	if gcBlackenEnabled != 0 {
  3819  		deductAssistCredit(size)
  3820  	}
  3821  
  3822  	const sizeclass = 25
  3823  
  3824  	const elemsize = 480
  3825  
  3826  	mp := acquirem()
  3827  	if doubleCheckMalloc {
  3828  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3829  	}
  3830  	mp.mallocing = 1
  3831  
  3832  	checkGCTrigger := false
  3833  	c := getMCache(mp)
  3834  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3835  	span := c.alloc[spc]
  3836  
  3837  	var nextFreeFastResult gclinkptr
  3838  	if span.allocCache != 0 {
  3839  		theBit := sys.TrailingZeros64(span.allocCache)
  3840  		result := span.freeindex + uint16(theBit)
  3841  		if result < span.nelems {
  3842  			freeidx := result + 1
  3843  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3844  				span.allocCache >>= uint(theBit + 1)
  3845  				span.freeindex = freeidx
  3846  				span.allocCount++
  3847  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3848  					480 +
  3849  					span.base())
  3850  			}
  3851  		}
  3852  	}
  3853  	v := nextFreeFastResult
  3854  	if v == 0 {
  3855  		v, span, checkGCTrigger = c.nextFree(spc)
  3856  	}
  3857  	x := unsafe.Pointer(v)
  3858  	if span.needzero != 0 {
  3859  		memclrNoHeapPointers(x, elemsize)
  3860  	}
  3861  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3862  
  3863  		c.scanAlloc += 8
  3864  	} else {
  3865  		dataSize := size
  3866  		x := uintptr(x)
  3867  
  3868  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) {
  3869  			throw("tried to write heap bits, but no heap bits in span")
  3870  		}
  3871  
  3872  		src0 := readUintptr(getGCMask(typ))
  3873  
  3874  		const elemsize = 480
  3875  
  3876  		scanSize := typ.PtrBytes
  3877  		src := src0
  3878  		if typ.Size_ == goarch.PtrSize {
  3879  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3880  		} else {
  3881  
  3882  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3883  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3884  			}
  3885  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3886  				src |= src0 << (i / goarch.PtrSize)
  3887  				scanSize += typ.Size_
  3888  			}
  3889  		}
  3890  
  3891  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3892  		dst := unsafe.Pointer(dstBase)
  3893  		o := (x - span.base()) / goarch.PtrSize
  3894  		i := o / ptrBits
  3895  		j := o % ptrBits
  3896  		const bits uintptr = elemsize / goarch.PtrSize
  3897  
  3898  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3899  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3900  
  3901  			bits0 := ptrBits - j
  3902  			bits1 := bits - bits0
  3903  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3904  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3905  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3906  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3907  		} else {
  3908  
  3909  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3910  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3911  		}
  3912  
  3913  		const doubleCheck = false
  3914  		if doubleCheck {
  3915  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3916  		}
  3917  		if doubleCheckHeapSetType {
  3918  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3919  		}
  3920  		c.scanAlloc += scanSize
  3921  	}
  3922  
  3923  	publicationBarrier()
  3924  
  3925  	if writeBarrier.enabled {
  3926  
  3927  		gcmarknewobject(span, uintptr(x))
  3928  	} else {
  3929  
  3930  		span.freeIndexForScan = span.freeindex
  3931  	}
  3932  
  3933  	c.nextSample -= int64(elemsize)
  3934  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3935  		profilealloc(mp, x, elemsize)
  3936  	}
  3937  	mp.mallocing = 0
  3938  	releasem(mp)
  3939  
  3940  	if checkGCTrigger {
  3941  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3942  			gcStart(t)
  3943  		}
  3944  	}
  3945  	if valgrindenabled {
  3946  		valgrindMalloc(x, size)
  3947  	}
  3948  
  3949  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3950  		if assistG := getg().m.curg; assistG != nil {
  3951  			assistG.gcAssistBytes -= int64(elemsize - size)
  3952  		}
  3953  	}
  3954  
  3955  	if debug.malloc {
  3956  		postMallocgcDebug(x, elemsize, typ)
  3957  	}
  3958  	return x
  3959  }
  3960  
  3961  func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3962  	if doubleCheckMalloc {
  3963  		if gcphase == _GCmarktermination {
  3964  			throw("mallocgc called with gcphase == _GCmarktermination")
  3965  		}
  3966  	}
  3967  
  3968  	lockRankMayQueueFinalizer()
  3969  
  3970  	if debug.malloc {
  3971  		if x := preMallocgcDebug(size, typ); x != nil {
  3972  			return x
  3973  		}
  3974  	}
  3975  
  3976  	if gcBlackenEnabled != 0 {
  3977  		deductAssistCredit(size)
  3978  	}
  3979  
  3980  	const sizeclass = 26
  3981  
  3982  	const elemsize = 512
  3983  
  3984  	mp := acquirem()
  3985  	if doubleCheckMalloc {
  3986  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3987  	}
  3988  	mp.mallocing = 1
  3989  
  3990  	checkGCTrigger := false
  3991  	c := getMCache(mp)
  3992  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3993  	span := c.alloc[spc]
  3994  
  3995  	var nextFreeFastResult gclinkptr
  3996  	if span.allocCache != 0 {
  3997  		theBit := sys.TrailingZeros64(span.allocCache)
  3998  		result := span.freeindex + uint16(theBit)
  3999  		if result < span.nelems {
  4000  			freeidx := result + 1
  4001  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  4002  				span.allocCache >>= uint(theBit + 1)
  4003  				span.freeindex = freeidx
  4004  				span.allocCount++
  4005  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4006  					512 +
  4007  					span.base())
  4008  			}
  4009  		}
  4010  	}
  4011  	v := nextFreeFastResult
  4012  	if v == 0 {
  4013  		v, span, checkGCTrigger = c.nextFree(spc)
  4014  	}
  4015  	x := unsafe.Pointer(v)
  4016  	if span.needzero != 0 {
  4017  		memclrNoHeapPointers(x, elemsize)
  4018  	}
  4019  	if goarch.PtrSize == 8 && sizeclass == 1 {
  4020  
  4021  		c.scanAlloc += 8
  4022  	} else {
  4023  		dataSize := size
  4024  		x := uintptr(x)
  4025  
  4026  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) {
  4027  			throw("tried to write heap bits, but no heap bits in span")
  4028  		}
  4029  
  4030  		src0 := readUintptr(getGCMask(typ))
  4031  
  4032  		const elemsize = 512
  4033  
  4034  		scanSize := typ.PtrBytes
  4035  		src := src0
  4036  		if typ.Size_ == goarch.PtrSize {
  4037  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  4038  		} else {
  4039  
  4040  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  4041  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  4042  			}
  4043  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  4044  				src |= src0 << (i / goarch.PtrSize)
  4045  				scanSize += typ.Size_
  4046  			}
  4047  		}
  4048  
  4049  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  4050  		dst := unsafe.Pointer(dstBase)
  4051  		o := (x - span.base()) / goarch.PtrSize
  4052  		i := o / ptrBits
  4053  		j := o % ptrBits
  4054  		const bits uintptr = elemsize / goarch.PtrSize
  4055  
  4056  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  4057  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  4058  
  4059  			bits0 := ptrBits - j
  4060  			bits1 := bits - bits0
  4061  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  4062  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  4063  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  4064  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  4065  		} else {
  4066  
  4067  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  4068  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  4069  		}
  4070  
  4071  		const doubleCheck = false
  4072  		if doubleCheck {
  4073  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  4074  		}
  4075  		if doubleCheckHeapSetType {
  4076  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  4077  		}
  4078  		c.scanAlloc += scanSize
  4079  	}
  4080  
  4081  	publicationBarrier()
  4082  
  4083  	if writeBarrier.enabled {
  4084  
  4085  		gcmarknewobject(span, uintptr(x))
  4086  	} else {
  4087  
  4088  		span.freeIndexForScan = span.freeindex
  4089  	}
  4090  
  4091  	c.nextSample -= int64(elemsize)
  4092  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4093  		profilealloc(mp, x, elemsize)
  4094  	}
  4095  	mp.mallocing = 0
  4096  	releasem(mp)
  4097  
  4098  	if checkGCTrigger {
  4099  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4100  			gcStart(t)
  4101  		}
  4102  	}
  4103  	if valgrindenabled {
  4104  		valgrindMalloc(x, size)
  4105  	}
  4106  
  4107  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4108  		if assistG := getg().m.curg; assistG != nil {
  4109  			assistG.gcAssistBytes -= int64(elemsize - size)
  4110  		}
  4111  	}
  4112  
  4113  	if debug.malloc {
  4114  		postMallocgcDebug(x, elemsize, typ)
  4115  	}
  4116  	return x
  4117  }
  4118  
  4119  func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4120  	if doubleCheckMalloc {
  4121  		if gcphase == _GCmarktermination {
  4122  			throw("mallocgc called with gcphase == _GCmarktermination")
  4123  		}
  4124  	}
  4125  
  4126  	lockRankMayQueueFinalizer()
  4127  
  4128  	if debug.malloc {
  4129  		if x := preMallocgcDebug(size, typ); x != nil {
  4130  			return x
  4131  		}
  4132  	}
  4133  
  4134  	if gcBlackenEnabled != 0 {
  4135  		deductAssistCredit(size)
  4136  	}
  4137  
  4138  	const constsize = 1
  4139  
  4140  	const elemsize = 16
  4141  
  4142  	mp := acquirem()
  4143  	if doubleCheckMalloc {
  4144  		doubleCheckTiny(constsize, typ, mp)
  4145  	}
  4146  	mp.mallocing = 1
  4147  
  4148  	c := getMCache(mp)
  4149  	off := c.tinyoffset
  4150  
  4151  	if constsize&7 == 0 {
  4152  		off = alignUp(off, 8)
  4153  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4154  
  4155  		off = alignUp(off, 8)
  4156  	} else if constsize&3 == 0 {
  4157  		off = alignUp(off, 4)
  4158  	} else if constsize&1 == 0 {
  4159  		off = alignUp(off, 2)
  4160  	}
  4161  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4162  
  4163  		x := unsafe.Pointer(c.tiny + off)
  4164  		c.tinyoffset = off + constsize
  4165  		c.tinyAllocs++
  4166  		mp.mallocing = 0
  4167  		releasem(mp)
  4168  		const elemsize = 0
  4169  		{
  4170  
  4171  			if valgrindenabled {
  4172  				valgrindMalloc(x, size)
  4173  			}
  4174  
  4175  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4176  				if assistG := getg().m.curg; assistG != nil {
  4177  					assistG.gcAssistBytes -= int64(elemsize - size)
  4178  				}
  4179  			}
  4180  
  4181  			if debug.malloc {
  4182  				postMallocgcDebug(x, elemsize, typ)
  4183  			}
  4184  			return x
  4185  		}
  4186  
  4187  	}
  4188  
  4189  	checkGCTrigger := false
  4190  	span := c.alloc[tinySpanClass]
  4191  
  4192  	const nbytes = 8192
  4193  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4194  		16,
  4195  	)
  4196  	var nextFreeFastResult gclinkptr
  4197  	if span.allocCache != 0 {
  4198  		theBit := sys.TrailingZeros64(span.allocCache)
  4199  		result := span.freeindex + uint16(theBit)
  4200  		if result < nelems {
  4201  			freeidx := result + 1
  4202  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4203  				span.allocCache >>= uint(theBit + 1)
  4204  				span.freeindex = freeidx
  4205  				span.allocCount++
  4206  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4207  					16 +
  4208  					span.base())
  4209  			}
  4210  		}
  4211  	}
  4212  	v := nextFreeFastResult
  4213  	if v == 0 {
  4214  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4215  	}
  4216  	x := unsafe.Pointer(v)
  4217  	(*[2]uint64)(x)[0] = 0
  4218  	(*[2]uint64)(x)[1] = 0
  4219  
  4220  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4221  
  4222  		c.tiny = uintptr(x)
  4223  		c.tinyoffset = constsize
  4224  	}
  4225  
  4226  	publicationBarrier()
  4227  
  4228  	if writeBarrier.enabled {
  4229  
  4230  		gcmarknewobject(span, uintptr(x))
  4231  	} else {
  4232  
  4233  		span.freeIndexForScan = span.freeindex
  4234  	}
  4235  
  4236  	c.nextSample -= int64(elemsize)
  4237  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4238  		profilealloc(mp, x, elemsize)
  4239  	}
  4240  	mp.mallocing = 0
  4241  	releasem(mp)
  4242  
  4243  	if checkGCTrigger {
  4244  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4245  			gcStart(t)
  4246  		}
  4247  	}
  4248  
  4249  	if raceenabled {
  4250  
  4251  		x = add(x, elemsize-constsize)
  4252  	}
  4253  	if valgrindenabled {
  4254  		valgrindMalloc(x, size)
  4255  	}
  4256  
  4257  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4258  		if assistG := getg().m.curg; assistG != nil {
  4259  			assistG.gcAssistBytes -= int64(elemsize - size)
  4260  		}
  4261  	}
  4262  
  4263  	if debug.malloc {
  4264  		postMallocgcDebug(x, elemsize, typ)
  4265  	}
  4266  	return x
  4267  }
  4268  
  4269  func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4270  	if doubleCheckMalloc {
  4271  		if gcphase == _GCmarktermination {
  4272  			throw("mallocgc called with gcphase == _GCmarktermination")
  4273  		}
  4274  	}
  4275  
  4276  	lockRankMayQueueFinalizer()
  4277  
  4278  	if debug.malloc {
  4279  		if x := preMallocgcDebug(size, typ); x != nil {
  4280  			return x
  4281  		}
  4282  	}
  4283  
  4284  	if gcBlackenEnabled != 0 {
  4285  		deductAssistCredit(size)
  4286  	}
  4287  
  4288  	const constsize = 2
  4289  
  4290  	const elemsize = 16
  4291  
  4292  	mp := acquirem()
  4293  	if doubleCheckMalloc {
  4294  		doubleCheckTiny(constsize, typ, mp)
  4295  	}
  4296  	mp.mallocing = 1
  4297  
  4298  	c := getMCache(mp)
  4299  	off := c.tinyoffset
  4300  
  4301  	if constsize&7 == 0 {
  4302  		off = alignUp(off, 8)
  4303  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4304  
  4305  		off = alignUp(off, 8)
  4306  	} else if constsize&3 == 0 {
  4307  		off = alignUp(off, 4)
  4308  	} else if constsize&1 == 0 {
  4309  		off = alignUp(off, 2)
  4310  	}
  4311  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4312  
  4313  		x := unsafe.Pointer(c.tiny + off)
  4314  		c.tinyoffset = off + constsize
  4315  		c.tinyAllocs++
  4316  		mp.mallocing = 0
  4317  		releasem(mp)
  4318  		const elemsize = 0
  4319  		{
  4320  
  4321  			if valgrindenabled {
  4322  				valgrindMalloc(x, size)
  4323  			}
  4324  
  4325  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4326  				if assistG := getg().m.curg; assistG != nil {
  4327  					assistG.gcAssistBytes -= int64(elemsize - size)
  4328  				}
  4329  			}
  4330  
  4331  			if debug.malloc {
  4332  				postMallocgcDebug(x, elemsize, typ)
  4333  			}
  4334  			return x
  4335  		}
  4336  
  4337  	}
  4338  
  4339  	checkGCTrigger := false
  4340  	span := c.alloc[tinySpanClass]
  4341  
  4342  	const nbytes = 8192
  4343  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4344  		16,
  4345  	)
  4346  	var nextFreeFastResult gclinkptr
  4347  	if span.allocCache != 0 {
  4348  		theBit := sys.TrailingZeros64(span.allocCache)
  4349  		result := span.freeindex + uint16(theBit)
  4350  		if result < nelems {
  4351  			freeidx := result + 1
  4352  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4353  				span.allocCache >>= uint(theBit + 1)
  4354  				span.freeindex = freeidx
  4355  				span.allocCount++
  4356  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4357  					16 +
  4358  					span.base())
  4359  			}
  4360  		}
  4361  	}
  4362  	v := nextFreeFastResult
  4363  	if v == 0 {
  4364  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4365  	}
  4366  	x := unsafe.Pointer(v)
  4367  	(*[2]uint64)(x)[0] = 0
  4368  	(*[2]uint64)(x)[1] = 0
  4369  
  4370  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4371  
  4372  		c.tiny = uintptr(x)
  4373  		c.tinyoffset = constsize
  4374  	}
  4375  
  4376  	publicationBarrier()
  4377  
  4378  	if writeBarrier.enabled {
  4379  
  4380  		gcmarknewobject(span, uintptr(x))
  4381  	} else {
  4382  
  4383  		span.freeIndexForScan = span.freeindex
  4384  	}
  4385  
  4386  	c.nextSample -= int64(elemsize)
  4387  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4388  		profilealloc(mp, x, elemsize)
  4389  	}
  4390  	mp.mallocing = 0
  4391  	releasem(mp)
  4392  
  4393  	if checkGCTrigger {
  4394  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4395  			gcStart(t)
  4396  		}
  4397  	}
  4398  
  4399  	if raceenabled {
  4400  
  4401  		x = add(x, elemsize-constsize)
  4402  	}
  4403  	if valgrindenabled {
  4404  		valgrindMalloc(x, size)
  4405  	}
  4406  
  4407  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4408  		if assistG := getg().m.curg; assistG != nil {
  4409  			assistG.gcAssistBytes -= int64(elemsize - size)
  4410  		}
  4411  	}
  4412  
  4413  	if debug.malloc {
  4414  		postMallocgcDebug(x, elemsize, typ)
  4415  	}
  4416  	return x
  4417  }
  4418  
  4419  func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4420  	if doubleCheckMalloc {
  4421  		if gcphase == _GCmarktermination {
  4422  			throw("mallocgc called with gcphase == _GCmarktermination")
  4423  		}
  4424  	}
  4425  
  4426  	lockRankMayQueueFinalizer()
  4427  
  4428  	if debug.malloc {
  4429  		if x := preMallocgcDebug(size, typ); x != nil {
  4430  			return x
  4431  		}
  4432  	}
  4433  
  4434  	if gcBlackenEnabled != 0 {
  4435  		deductAssistCredit(size)
  4436  	}
  4437  
  4438  	const constsize = 3
  4439  
  4440  	const elemsize = 16
  4441  
  4442  	mp := acquirem()
  4443  	if doubleCheckMalloc {
  4444  		doubleCheckTiny(constsize, typ, mp)
  4445  	}
  4446  	mp.mallocing = 1
  4447  
  4448  	c := getMCache(mp)
  4449  	off := c.tinyoffset
  4450  
  4451  	if constsize&7 == 0 {
  4452  		off = alignUp(off, 8)
  4453  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4454  
  4455  		off = alignUp(off, 8)
  4456  	} else if constsize&3 == 0 {
  4457  		off = alignUp(off, 4)
  4458  	} else if constsize&1 == 0 {
  4459  		off = alignUp(off, 2)
  4460  	}
  4461  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4462  
  4463  		x := unsafe.Pointer(c.tiny + off)
  4464  		c.tinyoffset = off + constsize
  4465  		c.tinyAllocs++
  4466  		mp.mallocing = 0
  4467  		releasem(mp)
  4468  		const elemsize = 0
  4469  		{
  4470  
  4471  			if valgrindenabled {
  4472  				valgrindMalloc(x, size)
  4473  			}
  4474  
  4475  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4476  				if assistG := getg().m.curg; assistG != nil {
  4477  					assistG.gcAssistBytes -= int64(elemsize - size)
  4478  				}
  4479  			}
  4480  
  4481  			if debug.malloc {
  4482  				postMallocgcDebug(x, elemsize, typ)
  4483  			}
  4484  			return x
  4485  		}
  4486  
  4487  	}
  4488  
  4489  	checkGCTrigger := false
  4490  	span := c.alloc[tinySpanClass]
  4491  
  4492  	const nbytes = 8192
  4493  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4494  		16,
  4495  	)
  4496  	var nextFreeFastResult gclinkptr
  4497  	if span.allocCache != 0 {
  4498  		theBit := sys.TrailingZeros64(span.allocCache)
  4499  		result := span.freeindex + uint16(theBit)
  4500  		if result < nelems {
  4501  			freeidx := result + 1
  4502  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4503  				span.allocCache >>= uint(theBit + 1)
  4504  				span.freeindex = freeidx
  4505  				span.allocCount++
  4506  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4507  					16 +
  4508  					span.base())
  4509  			}
  4510  		}
  4511  	}
  4512  	v := nextFreeFastResult
  4513  	if v == 0 {
  4514  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4515  	}
  4516  	x := unsafe.Pointer(v)
  4517  	(*[2]uint64)(x)[0] = 0
  4518  	(*[2]uint64)(x)[1] = 0
  4519  
  4520  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4521  
  4522  		c.tiny = uintptr(x)
  4523  		c.tinyoffset = constsize
  4524  	}
  4525  
  4526  	publicationBarrier()
  4527  
  4528  	if writeBarrier.enabled {
  4529  
  4530  		gcmarknewobject(span, uintptr(x))
  4531  	} else {
  4532  
  4533  		span.freeIndexForScan = span.freeindex
  4534  	}
  4535  
  4536  	c.nextSample -= int64(elemsize)
  4537  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4538  		profilealloc(mp, x, elemsize)
  4539  	}
  4540  	mp.mallocing = 0
  4541  	releasem(mp)
  4542  
  4543  	if checkGCTrigger {
  4544  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4545  			gcStart(t)
  4546  		}
  4547  	}
  4548  
  4549  	if raceenabled {
  4550  
  4551  		x = add(x, elemsize-constsize)
  4552  	}
  4553  	if valgrindenabled {
  4554  		valgrindMalloc(x, size)
  4555  	}
  4556  
  4557  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4558  		if assistG := getg().m.curg; assistG != nil {
  4559  			assistG.gcAssistBytes -= int64(elemsize - size)
  4560  		}
  4561  	}
  4562  
  4563  	if debug.malloc {
  4564  		postMallocgcDebug(x, elemsize, typ)
  4565  	}
  4566  	return x
  4567  }
  4568  
  4569  func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4570  	if doubleCheckMalloc {
  4571  		if gcphase == _GCmarktermination {
  4572  			throw("mallocgc called with gcphase == _GCmarktermination")
  4573  		}
  4574  	}
  4575  
  4576  	lockRankMayQueueFinalizer()
  4577  
  4578  	if debug.malloc {
  4579  		if x := preMallocgcDebug(size, typ); x != nil {
  4580  			return x
  4581  		}
  4582  	}
  4583  
  4584  	if gcBlackenEnabled != 0 {
  4585  		deductAssistCredit(size)
  4586  	}
  4587  
  4588  	const constsize = 4
  4589  
  4590  	const elemsize = 16
  4591  
  4592  	mp := acquirem()
  4593  	if doubleCheckMalloc {
  4594  		doubleCheckTiny(constsize, typ, mp)
  4595  	}
  4596  	mp.mallocing = 1
  4597  
  4598  	c := getMCache(mp)
  4599  	off := c.tinyoffset
  4600  
  4601  	if constsize&7 == 0 {
  4602  		off = alignUp(off, 8)
  4603  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4604  
  4605  		off = alignUp(off, 8)
  4606  	} else if constsize&3 == 0 {
  4607  		off = alignUp(off, 4)
  4608  	} else if constsize&1 == 0 {
  4609  		off = alignUp(off, 2)
  4610  	}
  4611  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4612  
  4613  		x := unsafe.Pointer(c.tiny + off)
  4614  		c.tinyoffset = off + constsize
  4615  		c.tinyAllocs++
  4616  		mp.mallocing = 0
  4617  		releasem(mp)
  4618  		const elemsize = 0
  4619  		{
  4620  
  4621  			if valgrindenabled {
  4622  				valgrindMalloc(x, size)
  4623  			}
  4624  
  4625  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4626  				if assistG := getg().m.curg; assistG != nil {
  4627  					assistG.gcAssistBytes -= int64(elemsize - size)
  4628  				}
  4629  			}
  4630  
  4631  			if debug.malloc {
  4632  				postMallocgcDebug(x, elemsize, typ)
  4633  			}
  4634  			return x
  4635  		}
  4636  
  4637  	}
  4638  
  4639  	checkGCTrigger := false
  4640  	span := c.alloc[tinySpanClass]
  4641  
  4642  	const nbytes = 8192
  4643  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4644  		16,
  4645  	)
  4646  	var nextFreeFastResult gclinkptr
  4647  	if span.allocCache != 0 {
  4648  		theBit := sys.TrailingZeros64(span.allocCache)
  4649  		result := span.freeindex + uint16(theBit)
  4650  		if result < nelems {
  4651  			freeidx := result + 1
  4652  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4653  				span.allocCache >>= uint(theBit + 1)
  4654  				span.freeindex = freeidx
  4655  				span.allocCount++
  4656  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4657  					16 +
  4658  					span.base())
  4659  			}
  4660  		}
  4661  	}
  4662  	v := nextFreeFastResult
  4663  	if v == 0 {
  4664  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4665  	}
  4666  	x := unsafe.Pointer(v)
  4667  	(*[2]uint64)(x)[0] = 0
  4668  	(*[2]uint64)(x)[1] = 0
  4669  
  4670  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4671  
  4672  		c.tiny = uintptr(x)
  4673  		c.tinyoffset = constsize
  4674  	}
  4675  
  4676  	publicationBarrier()
  4677  
  4678  	if writeBarrier.enabled {
  4679  
  4680  		gcmarknewobject(span, uintptr(x))
  4681  	} else {
  4682  
  4683  		span.freeIndexForScan = span.freeindex
  4684  	}
  4685  
  4686  	c.nextSample -= int64(elemsize)
  4687  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4688  		profilealloc(mp, x, elemsize)
  4689  	}
  4690  	mp.mallocing = 0
  4691  	releasem(mp)
  4692  
  4693  	if checkGCTrigger {
  4694  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4695  			gcStart(t)
  4696  		}
  4697  	}
  4698  
  4699  	if raceenabled {
  4700  
  4701  		x = add(x, elemsize-constsize)
  4702  	}
  4703  	if valgrindenabled {
  4704  		valgrindMalloc(x, size)
  4705  	}
  4706  
  4707  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4708  		if assistG := getg().m.curg; assistG != nil {
  4709  			assistG.gcAssistBytes -= int64(elemsize - size)
  4710  		}
  4711  	}
  4712  
  4713  	if debug.malloc {
  4714  		postMallocgcDebug(x, elemsize, typ)
  4715  	}
  4716  	return x
  4717  }
  4718  
  4719  func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4720  	if doubleCheckMalloc {
  4721  		if gcphase == _GCmarktermination {
  4722  			throw("mallocgc called with gcphase == _GCmarktermination")
  4723  		}
  4724  	}
  4725  
  4726  	lockRankMayQueueFinalizer()
  4727  
  4728  	if debug.malloc {
  4729  		if x := preMallocgcDebug(size, typ); x != nil {
  4730  			return x
  4731  		}
  4732  	}
  4733  
  4734  	if gcBlackenEnabled != 0 {
  4735  		deductAssistCredit(size)
  4736  	}
  4737  
  4738  	const constsize = 5
  4739  
  4740  	const elemsize = 16
  4741  
  4742  	mp := acquirem()
  4743  	if doubleCheckMalloc {
  4744  		doubleCheckTiny(constsize, typ, mp)
  4745  	}
  4746  	mp.mallocing = 1
  4747  
  4748  	c := getMCache(mp)
  4749  	off := c.tinyoffset
  4750  
  4751  	if constsize&7 == 0 {
  4752  		off = alignUp(off, 8)
  4753  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4754  
  4755  		off = alignUp(off, 8)
  4756  	} else if constsize&3 == 0 {
  4757  		off = alignUp(off, 4)
  4758  	} else if constsize&1 == 0 {
  4759  		off = alignUp(off, 2)
  4760  	}
  4761  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4762  
  4763  		x := unsafe.Pointer(c.tiny + off)
  4764  		c.tinyoffset = off + constsize
  4765  		c.tinyAllocs++
  4766  		mp.mallocing = 0
  4767  		releasem(mp)
  4768  		const elemsize = 0
  4769  		{
  4770  
  4771  			if valgrindenabled {
  4772  				valgrindMalloc(x, size)
  4773  			}
  4774  
  4775  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4776  				if assistG := getg().m.curg; assistG != nil {
  4777  					assistG.gcAssistBytes -= int64(elemsize - size)
  4778  				}
  4779  			}
  4780  
  4781  			if debug.malloc {
  4782  				postMallocgcDebug(x, elemsize, typ)
  4783  			}
  4784  			return x
  4785  		}
  4786  
  4787  	}
  4788  
  4789  	checkGCTrigger := false
  4790  	span := c.alloc[tinySpanClass]
  4791  
  4792  	const nbytes = 8192
  4793  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4794  		16,
  4795  	)
  4796  	var nextFreeFastResult gclinkptr
  4797  	if span.allocCache != 0 {
  4798  		theBit := sys.TrailingZeros64(span.allocCache)
  4799  		result := span.freeindex + uint16(theBit)
  4800  		if result < nelems {
  4801  			freeidx := result + 1
  4802  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4803  				span.allocCache >>= uint(theBit + 1)
  4804  				span.freeindex = freeidx
  4805  				span.allocCount++
  4806  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4807  					16 +
  4808  					span.base())
  4809  			}
  4810  		}
  4811  	}
  4812  	v := nextFreeFastResult
  4813  	if v == 0 {
  4814  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4815  	}
  4816  	x := unsafe.Pointer(v)
  4817  	(*[2]uint64)(x)[0] = 0
  4818  	(*[2]uint64)(x)[1] = 0
  4819  
  4820  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4821  
  4822  		c.tiny = uintptr(x)
  4823  		c.tinyoffset = constsize
  4824  	}
  4825  
  4826  	publicationBarrier()
  4827  
  4828  	if writeBarrier.enabled {
  4829  
  4830  		gcmarknewobject(span, uintptr(x))
  4831  	} else {
  4832  
  4833  		span.freeIndexForScan = span.freeindex
  4834  	}
  4835  
  4836  	c.nextSample -= int64(elemsize)
  4837  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4838  		profilealloc(mp, x, elemsize)
  4839  	}
  4840  	mp.mallocing = 0
  4841  	releasem(mp)
  4842  
  4843  	if checkGCTrigger {
  4844  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4845  			gcStart(t)
  4846  		}
  4847  	}
  4848  
  4849  	if raceenabled {
  4850  
  4851  		x = add(x, elemsize-constsize)
  4852  	}
  4853  	if valgrindenabled {
  4854  		valgrindMalloc(x, size)
  4855  	}
  4856  
  4857  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4858  		if assistG := getg().m.curg; assistG != nil {
  4859  			assistG.gcAssistBytes -= int64(elemsize - size)
  4860  		}
  4861  	}
  4862  
  4863  	if debug.malloc {
  4864  		postMallocgcDebug(x, elemsize, typ)
  4865  	}
  4866  	return x
  4867  }
  4868  
  4869  func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4870  	if doubleCheckMalloc {
  4871  		if gcphase == _GCmarktermination {
  4872  			throw("mallocgc called with gcphase == _GCmarktermination")
  4873  		}
  4874  	}
  4875  
  4876  	lockRankMayQueueFinalizer()
  4877  
  4878  	if debug.malloc {
  4879  		if x := preMallocgcDebug(size, typ); x != nil {
  4880  			return x
  4881  		}
  4882  	}
  4883  
  4884  	if gcBlackenEnabled != 0 {
  4885  		deductAssistCredit(size)
  4886  	}
  4887  
  4888  	const constsize = 6
  4889  
  4890  	const elemsize = 16
  4891  
  4892  	mp := acquirem()
  4893  	if doubleCheckMalloc {
  4894  		doubleCheckTiny(constsize, typ, mp)
  4895  	}
  4896  	mp.mallocing = 1
  4897  
  4898  	c := getMCache(mp)
  4899  	off := c.tinyoffset
  4900  
  4901  	if constsize&7 == 0 {
  4902  		off = alignUp(off, 8)
  4903  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4904  
  4905  		off = alignUp(off, 8)
  4906  	} else if constsize&3 == 0 {
  4907  		off = alignUp(off, 4)
  4908  	} else if constsize&1 == 0 {
  4909  		off = alignUp(off, 2)
  4910  	}
  4911  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4912  
  4913  		x := unsafe.Pointer(c.tiny + off)
  4914  		c.tinyoffset = off + constsize
  4915  		c.tinyAllocs++
  4916  		mp.mallocing = 0
  4917  		releasem(mp)
  4918  		const elemsize = 0
  4919  		{
  4920  
  4921  			if valgrindenabled {
  4922  				valgrindMalloc(x, size)
  4923  			}
  4924  
  4925  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4926  				if assistG := getg().m.curg; assistG != nil {
  4927  					assistG.gcAssistBytes -= int64(elemsize - size)
  4928  				}
  4929  			}
  4930  
  4931  			if debug.malloc {
  4932  				postMallocgcDebug(x, elemsize, typ)
  4933  			}
  4934  			return x
  4935  		}
  4936  
  4937  	}
  4938  
  4939  	checkGCTrigger := false
  4940  	span := c.alloc[tinySpanClass]
  4941  
  4942  	const nbytes = 8192
  4943  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4944  		16,
  4945  	)
  4946  	var nextFreeFastResult gclinkptr
  4947  	if span.allocCache != 0 {
  4948  		theBit := sys.TrailingZeros64(span.allocCache)
  4949  		result := span.freeindex + uint16(theBit)
  4950  		if result < nelems {
  4951  			freeidx := result + 1
  4952  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4953  				span.allocCache >>= uint(theBit + 1)
  4954  				span.freeindex = freeidx
  4955  				span.allocCount++
  4956  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4957  					16 +
  4958  					span.base())
  4959  			}
  4960  		}
  4961  	}
  4962  	v := nextFreeFastResult
  4963  	if v == 0 {
  4964  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4965  	}
  4966  	x := unsafe.Pointer(v)
  4967  	(*[2]uint64)(x)[0] = 0
  4968  	(*[2]uint64)(x)[1] = 0
  4969  
  4970  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4971  
  4972  		c.tiny = uintptr(x)
  4973  		c.tinyoffset = constsize
  4974  	}
  4975  
  4976  	publicationBarrier()
  4977  
  4978  	if writeBarrier.enabled {
  4979  
  4980  		gcmarknewobject(span, uintptr(x))
  4981  	} else {
  4982  
  4983  		span.freeIndexForScan = span.freeindex
  4984  	}
  4985  
  4986  	c.nextSample -= int64(elemsize)
  4987  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4988  		profilealloc(mp, x, elemsize)
  4989  	}
  4990  	mp.mallocing = 0
  4991  	releasem(mp)
  4992  
  4993  	if checkGCTrigger {
  4994  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4995  			gcStart(t)
  4996  		}
  4997  	}
  4998  
  4999  	if raceenabled {
  5000  
  5001  		x = add(x, elemsize-constsize)
  5002  	}
  5003  	if valgrindenabled {
  5004  		valgrindMalloc(x, size)
  5005  	}
  5006  
  5007  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5008  		if assistG := getg().m.curg; assistG != nil {
  5009  			assistG.gcAssistBytes -= int64(elemsize - size)
  5010  		}
  5011  	}
  5012  
  5013  	if debug.malloc {
  5014  		postMallocgcDebug(x, elemsize, typ)
  5015  	}
  5016  	return x
  5017  }
  5018  
  5019  func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5020  	if doubleCheckMalloc {
  5021  		if gcphase == _GCmarktermination {
  5022  			throw("mallocgc called with gcphase == _GCmarktermination")
  5023  		}
  5024  	}
  5025  
  5026  	lockRankMayQueueFinalizer()
  5027  
  5028  	if debug.malloc {
  5029  		if x := preMallocgcDebug(size, typ); x != nil {
  5030  			return x
  5031  		}
  5032  	}
  5033  
  5034  	if gcBlackenEnabled != 0 {
  5035  		deductAssistCredit(size)
  5036  	}
  5037  
  5038  	const constsize = 7
  5039  
  5040  	const elemsize = 16
  5041  
  5042  	mp := acquirem()
  5043  	if doubleCheckMalloc {
  5044  		doubleCheckTiny(constsize, typ, mp)
  5045  	}
  5046  	mp.mallocing = 1
  5047  
  5048  	c := getMCache(mp)
  5049  	off := c.tinyoffset
  5050  
  5051  	if constsize&7 == 0 {
  5052  		off = alignUp(off, 8)
  5053  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5054  
  5055  		off = alignUp(off, 8)
  5056  	} else if constsize&3 == 0 {
  5057  		off = alignUp(off, 4)
  5058  	} else if constsize&1 == 0 {
  5059  		off = alignUp(off, 2)
  5060  	}
  5061  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5062  
  5063  		x := unsafe.Pointer(c.tiny + off)
  5064  		c.tinyoffset = off + constsize
  5065  		c.tinyAllocs++
  5066  		mp.mallocing = 0
  5067  		releasem(mp)
  5068  		const elemsize = 0
  5069  		{
  5070  
  5071  			if valgrindenabled {
  5072  				valgrindMalloc(x, size)
  5073  			}
  5074  
  5075  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5076  				if assistG := getg().m.curg; assistG != nil {
  5077  					assistG.gcAssistBytes -= int64(elemsize - size)
  5078  				}
  5079  			}
  5080  
  5081  			if debug.malloc {
  5082  				postMallocgcDebug(x, elemsize, typ)
  5083  			}
  5084  			return x
  5085  		}
  5086  
  5087  	}
  5088  
  5089  	checkGCTrigger := false
  5090  	span := c.alloc[tinySpanClass]
  5091  
  5092  	const nbytes = 8192
  5093  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5094  		16,
  5095  	)
  5096  	var nextFreeFastResult gclinkptr
  5097  	if span.allocCache != 0 {
  5098  		theBit := sys.TrailingZeros64(span.allocCache)
  5099  		result := span.freeindex + uint16(theBit)
  5100  		if result < nelems {
  5101  			freeidx := result + 1
  5102  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5103  				span.allocCache >>= uint(theBit + 1)
  5104  				span.freeindex = freeidx
  5105  				span.allocCount++
  5106  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5107  					16 +
  5108  					span.base())
  5109  			}
  5110  		}
  5111  	}
  5112  	v := nextFreeFastResult
  5113  	if v == 0 {
  5114  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5115  	}
  5116  	x := unsafe.Pointer(v)
  5117  	(*[2]uint64)(x)[0] = 0
  5118  	(*[2]uint64)(x)[1] = 0
  5119  
  5120  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5121  
  5122  		c.tiny = uintptr(x)
  5123  		c.tinyoffset = constsize
  5124  	}
  5125  
  5126  	publicationBarrier()
  5127  
  5128  	if writeBarrier.enabled {
  5129  
  5130  		gcmarknewobject(span, uintptr(x))
  5131  	} else {
  5132  
  5133  		span.freeIndexForScan = span.freeindex
  5134  	}
  5135  
  5136  	c.nextSample -= int64(elemsize)
  5137  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5138  		profilealloc(mp, x, elemsize)
  5139  	}
  5140  	mp.mallocing = 0
  5141  	releasem(mp)
  5142  
  5143  	if checkGCTrigger {
  5144  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5145  			gcStart(t)
  5146  		}
  5147  	}
  5148  
  5149  	if raceenabled {
  5150  
  5151  		x = add(x, elemsize-constsize)
  5152  	}
  5153  	if valgrindenabled {
  5154  		valgrindMalloc(x, size)
  5155  	}
  5156  
  5157  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5158  		if assistG := getg().m.curg; assistG != nil {
  5159  			assistG.gcAssistBytes -= int64(elemsize - size)
  5160  		}
  5161  	}
  5162  
  5163  	if debug.malloc {
  5164  		postMallocgcDebug(x, elemsize, typ)
  5165  	}
  5166  	return x
  5167  }
  5168  
  5169  func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5170  	if doubleCheckMalloc {
  5171  		if gcphase == _GCmarktermination {
  5172  			throw("mallocgc called with gcphase == _GCmarktermination")
  5173  		}
  5174  	}
  5175  
  5176  	lockRankMayQueueFinalizer()
  5177  
  5178  	if debug.malloc {
  5179  		if x := preMallocgcDebug(size, typ); x != nil {
  5180  			return x
  5181  		}
  5182  	}
  5183  
  5184  	if gcBlackenEnabled != 0 {
  5185  		deductAssistCredit(size)
  5186  	}
  5187  
  5188  	const constsize = 8
  5189  
  5190  	const elemsize = 16
  5191  
  5192  	mp := acquirem()
  5193  	if doubleCheckMalloc {
  5194  		doubleCheckTiny(constsize, typ, mp)
  5195  	}
  5196  	mp.mallocing = 1
  5197  
  5198  	c := getMCache(mp)
  5199  	off := c.tinyoffset
  5200  
  5201  	if constsize&7 == 0 {
  5202  		off = alignUp(off, 8)
  5203  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5204  
  5205  		off = alignUp(off, 8)
  5206  	} else if constsize&3 == 0 {
  5207  		off = alignUp(off, 4)
  5208  	} else if constsize&1 == 0 {
  5209  		off = alignUp(off, 2)
  5210  	}
  5211  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5212  
  5213  		x := unsafe.Pointer(c.tiny + off)
  5214  		c.tinyoffset = off + constsize
  5215  		c.tinyAllocs++
  5216  		mp.mallocing = 0
  5217  		releasem(mp)
  5218  		const elemsize = 0
  5219  		{
  5220  
  5221  			if valgrindenabled {
  5222  				valgrindMalloc(x, size)
  5223  			}
  5224  
  5225  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5226  				if assistG := getg().m.curg; assistG != nil {
  5227  					assistG.gcAssistBytes -= int64(elemsize - size)
  5228  				}
  5229  			}
  5230  
  5231  			if debug.malloc {
  5232  				postMallocgcDebug(x, elemsize, typ)
  5233  			}
  5234  			return x
  5235  		}
  5236  
  5237  	}
  5238  
  5239  	checkGCTrigger := false
  5240  	span := c.alloc[tinySpanClass]
  5241  
  5242  	const nbytes = 8192
  5243  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5244  		16,
  5245  	)
  5246  	var nextFreeFastResult gclinkptr
  5247  	if span.allocCache != 0 {
  5248  		theBit := sys.TrailingZeros64(span.allocCache)
  5249  		result := span.freeindex + uint16(theBit)
  5250  		if result < nelems {
  5251  			freeidx := result + 1
  5252  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5253  				span.allocCache >>= uint(theBit + 1)
  5254  				span.freeindex = freeidx
  5255  				span.allocCount++
  5256  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5257  					16 +
  5258  					span.base())
  5259  			}
  5260  		}
  5261  	}
  5262  	v := nextFreeFastResult
  5263  	if v == 0 {
  5264  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5265  	}
  5266  	x := unsafe.Pointer(v)
  5267  	(*[2]uint64)(x)[0] = 0
  5268  	(*[2]uint64)(x)[1] = 0
  5269  
  5270  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5271  
  5272  		c.tiny = uintptr(x)
  5273  		c.tinyoffset = constsize
  5274  	}
  5275  
  5276  	publicationBarrier()
  5277  
  5278  	if writeBarrier.enabled {
  5279  
  5280  		gcmarknewobject(span, uintptr(x))
  5281  	} else {
  5282  
  5283  		span.freeIndexForScan = span.freeindex
  5284  	}
  5285  
  5286  	c.nextSample -= int64(elemsize)
  5287  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5288  		profilealloc(mp, x, elemsize)
  5289  	}
  5290  	mp.mallocing = 0
  5291  	releasem(mp)
  5292  
  5293  	if checkGCTrigger {
  5294  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5295  			gcStart(t)
  5296  		}
  5297  	}
  5298  
  5299  	if raceenabled {
  5300  
  5301  		x = add(x, elemsize-constsize)
  5302  	}
  5303  	if valgrindenabled {
  5304  		valgrindMalloc(x, size)
  5305  	}
  5306  
  5307  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5308  		if assistG := getg().m.curg; assistG != nil {
  5309  			assistG.gcAssistBytes -= int64(elemsize - size)
  5310  		}
  5311  	}
  5312  
  5313  	if debug.malloc {
  5314  		postMallocgcDebug(x, elemsize, typ)
  5315  	}
  5316  	return x
  5317  }
  5318  
  5319  func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5320  	if doubleCheckMalloc {
  5321  		if gcphase == _GCmarktermination {
  5322  			throw("mallocgc called with gcphase == _GCmarktermination")
  5323  		}
  5324  	}
  5325  
  5326  	lockRankMayQueueFinalizer()
  5327  
  5328  	if debug.malloc {
  5329  		if x := preMallocgcDebug(size, typ); x != nil {
  5330  			return x
  5331  		}
  5332  	}
  5333  
  5334  	if gcBlackenEnabled != 0 {
  5335  		deductAssistCredit(size)
  5336  	}
  5337  
  5338  	const constsize = 9
  5339  
  5340  	const elemsize = 16
  5341  
  5342  	mp := acquirem()
  5343  	if doubleCheckMalloc {
  5344  		doubleCheckTiny(constsize, typ, mp)
  5345  	}
  5346  	mp.mallocing = 1
  5347  
  5348  	c := getMCache(mp)
  5349  	off := c.tinyoffset
  5350  
  5351  	if constsize&7 == 0 {
  5352  		off = alignUp(off, 8)
  5353  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5354  
  5355  		off = alignUp(off, 8)
  5356  	} else if constsize&3 == 0 {
  5357  		off = alignUp(off, 4)
  5358  	} else if constsize&1 == 0 {
  5359  		off = alignUp(off, 2)
  5360  	}
  5361  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5362  
  5363  		x := unsafe.Pointer(c.tiny + off)
  5364  		c.tinyoffset = off + constsize
  5365  		c.tinyAllocs++
  5366  		mp.mallocing = 0
  5367  		releasem(mp)
  5368  		const elemsize = 0
  5369  		{
  5370  
  5371  			if valgrindenabled {
  5372  				valgrindMalloc(x, size)
  5373  			}
  5374  
  5375  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5376  				if assistG := getg().m.curg; assistG != nil {
  5377  					assistG.gcAssistBytes -= int64(elemsize - size)
  5378  				}
  5379  			}
  5380  
  5381  			if debug.malloc {
  5382  				postMallocgcDebug(x, elemsize, typ)
  5383  			}
  5384  			return x
  5385  		}
  5386  
  5387  	}
  5388  
  5389  	checkGCTrigger := false
  5390  	span := c.alloc[tinySpanClass]
  5391  
  5392  	const nbytes = 8192
  5393  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5394  		16,
  5395  	)
  5396  	var nextFreeFastResult gclinkptr
  5397  	if span.allocCache != 0 {
  5398  		theBit := sys.TrailingZeros64(span.allocCache)
  5399  		result := span.freeindex + uint16(theBit)
  5400  		if result < nelems {
  5401  			freeidx := result + 1
  5402  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5403  				span.allocCache >>= uint(theBit + 1)
  5404  				span.freeindex = freeidx
  5405  				span.allocCount++
  5406  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5407  					16 +
  5408  					span.base())
  5409  			}
  5410  		}
  5411  	}
  5412  	v := nextFreeFastResult
  5413  	if v == 0 {
  5414  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5415  	}
  5416  	x := unsafe.Pointer(v)
  5417  	(*[2]uint64)(x)[0] = 0
  5418  	(*[2]uint64)(x)[1] = 0
  5419  
  5420  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5421  
  5422  		c.tiny = uintptr(x)
  5423  		c.tinyoffset = constsize
  5424  	}
  5425  
  5426  	publicationBarrier()
  5427  
  5428  	if writeBarrier.enabled {
  5429  
  5430  		gcmarknewobject(span, uintptr(x))
  5431  	} else {
  5432  
  5433  		span.freeIndexForScan = span.freeindex
  5434  	}
  5435  
  5436  	c.nextSample -= int64(elemsize)
  5437  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5438  		profilealloc(mp, x, elemsize)
  5439  	}
  5440  	mp.mallocing = 0
  5441  	releasem(mp)
  5442  
  5443  	if checkGCTrigger {
  5444  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5445  			gcStart(t)
  5446  		}
  5447  	}
  5448  
  5449  	if raceenabled {
  5450  
  5451  		x = add(x, elemsize-constsize)
  5452  	}
  5453  	if valgrindenabled {
  5454  		valgrindMalloc(x, size)
  5455  	}
  5456  
  5457  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5458  		if assistG := getg().m.curg; assistG != nil {
  5459  			assistG.gcAssistBytes -= int64(elemsize - size)
  5460  		}
  5461  	}
  5462  
  5463  	if debug.malloc {
  5464  		postMallocgcDebug(x, elemsize, typ)
  5465  	}
  5466  	return x
  5467  }
  5468  
  5469  func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5470  	if doubleCheckMalloc {
  5471  		if gcphase == _GCmarktermination {
  5472  			throw("mallocgc called with gcphase == _GCmarktermination")
  5473  		}
  5474  	}
  5475  
  5476  	lockRankMayQueueFinalizer()
  5477  
  5478  	if debug.malloc {
  5479  		if x := preMallocgcDebug(size, typ); x != nil {
  5480  			return x
  5481  		}
  5482  	}
  5483  
  5484  	if gcBlackenEnabled != 0 {
  5485  		deductAssistCredit(size)
  5486  	}
  5487  
  5488  	const constsize = 10
  5489  
  5490  	const elemsize = 16
  5491  
  5492  	mp := acquirem()
  5493  	if doubleCheckMalloc {
  5494  		doubleCheckTiny(constsize, typ, mp)
  5495  	}
  5496  	mp.mallocing = 1
  5497  
  5498  	c := getMCache(mp)
  5499  	off := c.tinyoffset
  5500  
  5501  	if constsize&7 == 0 {
  5502  		off = alignUp(off, 8)
  5503  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5504  
  5505  		off = alignUp(off, 8)
  5506  	} else if constsize&3 == 0 {
  5507  		off = alignUp(off, 4)
  5508  	} else if constsize&1 == 0 {
  5509  		off = alignUp(off, 2)
  5510  	}
  5511  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5512  
  5513  		x := unsafe.Pointer(c.tiny + off)
  5514  		c.tinyoffset = off + constsize
  5515  		c.tinyAllocs++
  5516  		mp.mallocing = 0
  5517  		releasem(mp)
  5518  		const elemsize = 0
  5519  		{
  5520  
  5521  			if valgrindenabled {
  5522  				valgrindMalloc(x, size)
  5523  			}
  5524  
  5525  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5526  				if assistG := getg().m.curg; assistG != nil {
  5527  					assistG.gcAssistBytes -= int64(elemsize - size)
  5528  				}
  5529  			}
  5530  
  5531  			if debug.malloc {
  5532  				postMallocgcDebug(x, elemsize, typ)
  5533  			}
  5534  			return x
  5535  		}
  5536  
  5537  	}
  5538  
  5539  	checkGCTrigger := false
  5540  	span := c.alloc[tinySpanClass]
  5541  
  5542  	const nbytes = 8192
  5543  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5544  		16,
  5545  	)
  5546  	var nextFreeFastResult gclinkptr
  5547  	if span.allocCache != 0 {
  5548  		theBit := sys.TrailingZeros64(span.allocCache)
  5549  		result := span.freeindex + uint16(theBit)
  5550  		if result < nelems {
  5551  			freeidx := result + 1
  5552  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5553  				span.allocCache >>= uint(theBit + 1)
  5554  				span.freeindex = freeidx
  5555  				span.allocCount++
  5556  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5557  					16 +
  5558  					span.base())
  5559  			}
  5560  		}
  5561  	}
  5562  	v := nextFreeFastResult
  5563  	if v == 0 {
  5564  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5565  	}
  5566  	x := unsafe.Pointer(v)
  5567  	(*[2]uint64)(x)[0] = 0
  5568  	(*[2]uint64)(x)[1] = 0
  5569  
  5570  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5571  
  5572  		c.tiny = uintptr(x)
  5573  		c.tinyoffset = constsize
  5574  	}
  5575  
  5576  	publicationBarrier()
  5577  
  5578  	if writeBarrier.enabled {
  5579  
  5580  		gcmarknewobject(span, uintptr(x))
  5581  	} else {
  5582  
  5583  		span.freeIndexForScan = span.freeindex
  5584  	}
  5585  
  5586  	c.nextSample -= int64(elemsize)
  5587  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5588  		profilealloc(mp, x, elemsize)
  5589  	}
  5590  	mp.mallocing = 0
  5591  	releasem(mp)
  5592  
  5593  	if checkGCTrigger {
  5594  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5595  			gcStart(t)
  5596  		}
  5597  	}
  5598  
  5599  	if raceenabled {
  5600  
  5601  		x = add(x, elemsize-constsize)
  5602  	}
  5603  	if valgrindenabled {
  5604  		valgrindMalloc(x, size)
  5605  	}
  5606  
  5607  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5608  		if assistG := getg().m.curg; assistG != nil {
  5609  			assistG.gcAssistBytes -= int64(elemsize - size)
  5610  		}
  5611  	}
  5612  
  5613  	if debug.malloc {
  5614  		postMallocgcDebug(x, elemsize, typ)
  5615  	}
  5616  	return x
  5617  }
  5618  
  5619  func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5620  	if doubleCheckMalloc {
  5621  		if gcphase == _GCmarktermination {
  5622  			throw("mallocgc called with gcphase == _GCmarktermination")
  5623  		}
  5624  	}
  5625  
  5626  	lockRankMayQueueFinalizer()
  5627  
  5628  	if debug.malloc {
  5629  		if x := preMallocgcDebug(size, typ); x != nil {
  5630  			return x
  5631  		}
  5632  	}
  5633  
  5634  	if gcBlackenEnabled != 0 {
  5635  		deductAssistCredit(size)
  5636  	}
  5637  
  5638  	const constsize = 11
  5639  
  5640  	const elemsize = 16
  5641  
  5642  	mp := acquirem()
  5643  	if doubleCheckMalloc {
  5644  		doubleCheckTiny(constsize, typ, mp)
  5645  	}
  5646  	mp.mallocing = 1
  5647  
  5648  	c := getMCache(mp)
  5649  	off := c.tinyoffset
  5650  
  5651  	if constsize&7 == 0 {
  5652  		off = alignUp(off, 8)
  5653  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5654  
  5655  		off = alignUp(off, 8)
  5656  	} else if constsize&3 == 0 {
  5657  		off = alignUp(off, 4)
  5658  	} else if constsize&1 == 0 {
  5659  		off = alignUp(off, 2)
  5660  	}
  5661  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5662  
  5663  		x := unsafe.Pointer(c.tiny + off)
  5664  		c.tinyoffset = off + constsize
  5665  		c.tinyAllocs++
  5666  		mp.mallocing = 0
  5667  		releasem(mp)
  5668  		const elemsize = 0
  5669  		{
  5670  
  5671  			if valgrindenabled {
  5672  				valgrindMalloc(x, size)
  5673  			}
  5674  
  5675  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5676  				if assistG := getg().m.curg; assistG != nil {
  5677  					assistG.gcAssistBytes -= int64(elemsize - size)
  5678  				}
  5679  			}
  5680  
  5681  			if debug.malloc {
  5682  				postMallocgcDebug(x, elemsize, typ)
  5683  			}
  5684  			return x
  5685  		}
  5686  
  5687  	}
  5688  
  5689  	checkGCTrigger := false
  5690  	span := c.alloc[tinySpanClass]
  5691  
  5692  	const nbytes = 8192
  5693  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5694  		16,
  5695  	)
  5696  	var nextFreeFastResult gclinkptr
  5697  	if span.allocCache != 0 {
  5698  		theBit := sys.TrailingZeros64(span.allocCache)
  5699  		result := span.freeindex + uint16(theBit)
  5700  		if result < nelems {
  5701  			freeidx := result + 1
  5702  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5703  				span.allocCache >>= uint(theBit + 1)
  5704  				span.freeindex = freeidx
  5705  				span.allocCount++
  5706  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5707  					16 +
  5708  					span.base())
  5709  			}
  5710  		}
  5711  	}
  5712  	v := nextFreeFastResult
  5713  	if v == 0 {
  5714  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5715  	}
  5716  	x := unsafe.Pointer(v)
  5717  	(*[2]uint64)(x)[0] = 0
  5718  	(*[2]uint64)(x)[1] = 0
  5719  
  5720  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5721  
  5722  		c.tiny = uintptr(x)
  5723  		c.tinyoffset = constsize
  5724  	}
  5725  
  5726  	publicationBarrier()
  5727  
  5728  	if writeBarrier.enabled {
  5729  
  5730  		gcmarknewobject(span, uintptr(x))
  5731  	} else {
  5732  
  5733  		span.freeIndexForScan = span.freeindex
  5734  	}
  5735  
  5736  	c.nextSample -= int64(elemsize)
  5737  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5738  		profilealloc(mp, x, elemsize)
  5739  	}
  5740  	mp.mallocing = 0
  5741  	releasem(mp)
  5742  
  5743  	if checkGCTrigger {
  5744  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5745  			gcStart(t)
  5746  		}
  5747  	}
  5748  
  5749  	if raceenabled {
  5750  
  5751  		x = add(x, elemsize-constsize)
  5752  	}
  5753  	if valgrindenabled {
  5754  		valgrindMalloc(x, size)
  5755  	}
  5756  
  5757  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5758  		if assistG := getg().m.curg; assistG != nil {
  5759  			assistG.gcAssistBytes -= int64(elemsize - size)
  5760  		}
  5761  	}
  5762  
  5763  	if debug.malloc {
  5764  		postMallocgcDebug(x, elemsize, typ)
  5765  	}
  5766  	return x
  5767  }
  5768  
  5769  func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5770  	if doubleCheckMalloc {
  5771  		if gcphase == _GCmarktermination {
  5772  			throw("mallocgc called with gcphase == _GCmarktermination")
  5773  		}
  5774  	}
  5775  
  5776  	lockRankMayQueueFinalizer()
  5777  
  5778  	if debug.malloc {
  5779  		if x := preMallocgcDebug(size, typ); x != nil {
  5780  			return x
  5781  		}
  5782  	}
  5783  
  5784  	if gcBlackenEnabled != 0 {
  5785  		deductAssistCredit(size)
  5786  	}
  5787  
  5788  	const constsize = 12
  5789  
  5790  	const elemsize = 16
  5791  
  5792  	mp := acquirem()
  5793  	if doubleCheckMalloc {
  5794  		doubleCheckTiny(constsize, typ, mp)
  5795  	}
  5796  	mp.mallocing = 1
  5797  
  5798  	c := getMCache(mp)
  5799  	off := c.tinyoffset
  5800  
  5801  	if constsize&7 == 0 {
  5802  		off = alignUp(off, 8)
  5803  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5804  
  5805  		off = alignUp(off, 8)
  5806  	} else if constsize&3 == 0 {
  5807  		off = alignUp(off, 4)
  5808  	} else if constsize&1 == 0 {
  5809  		off = alignUp(off, 2)
  5810  	}
  5811  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5812  
  5813  		x := unsafe.Pointer(c.tiny + off)
  5814  		c.tinyoffset = off + constsize
  5815  		c.tinyAllocs++
  5816  		mp.mallocing = 0
  5817  		releasem(mp)
  5818  		const elemsize = 0
  5819  		{
  5820  
  5821  			if valgrindenabled {
  5822  				valgrindMalloc(x, size)
  5823  			}
  5824  
  5825  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5826  				if assistG := getg().m.curg; assistG != nil {
  5827  					assistG.gcAssistBytes -= int64(elemsize - size)
  5828  				}
  5829  			}
  5830  
  5831  			if debug.malloc {
  5832  				postMallocgcDebug(x, elemsize, typ)
  5833  			}
  5834  			return x
  5835  		}
  5836  
  5837  	}
  5838  
  5839  	checkGCTrigger := false
  5840  	span := c.alloc[tinySpanClass]
  5841  
  5842  	const nbytes = 8192
  5843  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5844  		16,
  5845  	)
  5846  	var nextFreeFastResult gclinkptr
  5847  	if span.allocCache != 0 {
  5848  		theBit := sys.TrailingZeros64(span.allocCache)
  5849  		result := span.freeindex + uint16(theBit)
  5850  		if result < nelems {
  5851  			freeidx := result + 1
  5852  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5853  				span.allocCache >>= uint(theBit + 1)
  5854  				span.freeindex = freeidx
  5855  				span.allocCount++
  5856  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5857  					16 +
  5858  					span.base())
  5859  			}
  5860  		}
  5861  	}
  5862  	v := nextFreeFastResult
  5863  	if v == 0 {
  5864  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5865  	}
  5866  	x := unsafe.Pointer(v)
  5867  	(*[2]uint64)(x)[0] = 0
  5868  	(*[2]uint64)(x)[1] = 0
  5869  
  5870  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5871  
  5872  		c.tiny = uintptr(x)
  5873  		c.tinyoffset = constsize
  5874  	}
  5875  
  5876  	publicationBarrier()
  5877  
  5878  	if writeBarrier.enabled {
  5879  
  5880  		gcmarknewobject(span, uintptr(x))
  5881  	} else {
  5882  
  5883  		span.freeIndexForScan = span.freeindex
  5884  	}
  5885  
  5886  	c.nextSample -= int64(elemsize)
  5887  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5888  		profilealloc(mp, x, elemsize)
  5889  	}
  5890  	mp.mallocing = 0
  5891  	releasem(mp)
  5892  
  5893  	if checkGCTrigger {
  5894  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5895  			gcStart(t)
  5896  		}
  5897  	}
  5898  
  5899  	if raceenabled {
  5900  
  5901  		x = add(x, elemsize-constsize)
  5902  	}
  5903  	if valgrindenabled {
  5904  		valgrindMalloc(x, size)
  5905  	}
  5906  
  5907  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5908  		if assistG := getg().m.curg; assistG != nil {
  5909  			assistG.gcAssistBytes -= int64(elemsize - size)
  5910  		}
  5911  	}
  5912  
  5913  	if debug.malloc {
  5914  		postMallocgcDebug(x, elemsize, typ)
  5915  	}
  5916  	return x
  5917  }
  5918  
  5919  func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5920  	if doubleCheckMalloc {
  5921  		if gcphase == _GCmarktermination {
  5922  			throw("mallocgc called with gcphase == _GCmarktermination")
  5923  		}
  5924  	}
  5925  
  5926  	lockRankMayQueueFinalizer()
  5927  
  5928  	if debug.malloc {
  5929  		if x := preMallocgcDebug(size, typ); x != nil {
  5930  			return x
  5931  		}
  5932  	}
  5933  
  5934  	if gcBlackenEnabled != 0 {
  5935  		deductAssistCredit(size)
  5936  	}
  5937  
  5938  	const constsize = 13
  5939  
  5940  	const elemsize = 16
  5941  
  5942  	mp := acquirem()
  5943  	if doubleCheckMalloc {
  5944  		doubleCheckTiny(constsize, typ, mp)
  5945  	}
  5946  	mp.mallocing = 1
  5947  
  5948  	c := getMCache(mp)
  5949  	off := c.tinyoffset
  5950  
  5951  	if constsize&7 == 0 {
  5952  		off = alignUp(off, 8)
  5953  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5954  
  5955  		off = alignUp(off, 8)
  5956  	} else if constsize&3 == 0 {
  5957  		off = alignUp(off, 4)
  5958  	} else if constsize&1 == 0 {
  5959  		off = alignUp(off, 2)
  5960  	}
  5961  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5962  
  5963  		x := unsafe.Pointer(c.tiny + off)
  5964  		c.tinyoffset = off + constsize
  5965  		c.tinyAllocs++
  5966  		mp.mallocing = 0
  5967  		releasem(mp)
  5968  		const elemsize = 0
  5969  		{
  5970  
  5971  			if valgrindenabled {
  5972  				valgrindMalloc(x, size)
  5973  			}
  5974  
  5975  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5976  				if assistG := getg().m.curg; assistG != nil {
  5977  					assistG.gcAssistBytes -= int64(elemsize - size)
  5978  				}
  5979  			}
  5980  
  5981  			if debug.malloc {
  5982  				postMallocgcDebug(x, elemsize, typ)
  5983  			}
  5984  			return x
  5985  		}
  5986  
  5987  	}
  5988  
  5989  	checkGCTrigger := false
  5990  	span := c.alloc[tinySpanClass]
  5991  
  5992  	const nbytes = 8192
  5993  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5994  		16,
  5995  	)
  5996  	var nextFreeFastResult gclinkptr
  5997  	if span.allocCache != 0 {
  5998  		theBit := sys.TrailingZeros64(span.allocCache)
  5999  		result := span.freeindex + uint16(theBit)
  6000  		if result < nelems {
  6001  			freeidx := result + 1
  6002  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6003  				span.allocCache >>= uint(theBit + 1)
  6004  				span.freeindex = freeidx
  6005  				span.allocCount++
  6006  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6007  					16 +
  6008  					span.base())
  6009  			}
  6010  		}
  6011  	}
  6012  	v := nextFreeFastResult
  6013  	if v == 0 {
  6014  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6015  	}
  6016  	x := unsafe.Pointer(v)
  6017  	(*[2]uint64)(x)[0] = 0
  6018  	(*[2]uint64)(x)[1] = 0
  6019  
  6020  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6021  
  6022  		c.tiny = uintptr(x)
  6023  		c.tinyoffset = constsize
  6024  	}
  6025  
  6026  	publicationBarrier()
  6027  
  6028  	if writeBarrier.enabled {
  6029  
  6030  		gcmarknewobject(span, uintptr(x))
  6031  	} else {
  6032  
  6033  		span.freeIndexForScan = span.freeindex
  6034  	}
  6035  
  6036  	c.nextSample -= int64(elemsize)
  6037  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6038  		profilealloc(mp, x, elemsize)
  6039  	}
  6040  	mp.mallocing = 0
  6041  	releasem(mp)
  6042  
  6043  	if checkGCTrigger {
  6044  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6045  			gcStart(t)
  6046  		}
  6047  	}
  6048  
  6049  	if raceenabled {
  6050  
  6051  		x = add(x, elemsize-constsize)
  6052  	}
  6053  	if valgrindenabled {
  6054  		valgrindMalloc(x, size)
  6055  	}
  6056  
  6057  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6058  		if assistG := getg().m.curg; assistG != nil {
  6059  			assistG.gcAssistBytes -= int64(elemsize - size)
  6060  		}
  6061  	}
  6062  
  6063  	if debug.malloc {
  6064  		postMallocgcDebug(x, elemsize, typ)
  6065  	}
  6066  	return x
  6067  }
  6068  
  6069  func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6070  	if doubleCheckMalloc {
  6071  		if gcphase == _GCmarktermination {
  6072  			throw("mallocgc called with gcphase == _GCmarktermination")
  6073  		}
  6074  	}
  6075  
  6076  	lockRankMayQueueFinalizer()
  6077  
  6078  	if debug.malloc {
  6079  		if x := preMallocgcDebug(size, typ); x != nil {
  6080  			return x
  6081  		}
  6082  	}
  6083  
  6084  	if gcBlackenEnabled != 0 {
  6085  		deductAssistCredit(size)
  6086  	}
  6087  
  6088  	const constsize = 14
  6089  
  6090  	const elemsize = 16
  6091  
  6092  	mp := acquirem()
  6093  	if doubleCheckMalloc {
  6094  		doubleCheckTiny(constsize, typ, mp)
  6095  	}
  6096  	mp.mallocing = 1
  6097  
  6098  	c := getMCache(mp)
  6099  	off := c.tinyoffset
  6100  
  6101  	if constsize&7 == 0 {
  6102  		off = alignUp(off, 8)
  6103  	} else if goarch.PtrSize == 4 && constsize == 12 {
  6104  
  6105  		off = alignUp(off, 8)
  6106  	} else if constsize&3 == 0 {
  6107  		off = alignUp(off, 4)
  6108  	} else if constsize&1 == 0 {
  6109  		off = alignUp(off, 2)
  6110  	}
  6111  	if off+constsize <= maxTinySize && c.tiny != 0 {
  6112  
  6113  		x := unsafe.Pointer(c.tiny + off)
  6114  		c.tinyoffset = off + constsize
  6115  		c.tinyAllocs++
  6116  		mp.mallocing = 0
  6117  		releasem(mp)
  6118  		const elemsize = 0
  6119  		{
  6120  
  6121  			if valgrindenabled {
  6122  				valgrindMalloc(x, size)
  6123  			}
  6124  
  6125  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6126  				if assistG := getg().m.curg; assistG != nil {
  6127  					assistG.gcAssistBytes -= int64(elemsize - size)
  6128  				}
  6129  			}
  6130  
  6131  			if debug.malloc {
  6132  				postMallocgcDebug(x, elemsize, typ)
  6133  			}
  6134  			return x
  6135  		}
  6136  
  6137  	}
  6138  
  6139  	checkGCTrigger := false
  6140  	span := c.alloc[tinySpanClass]
  6141  
  6142  	const nbytes = 8192
  6143  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  6144  		16,
  6145  	)
  6146  	var nextFreeFastResult gclinkptr
  6147  	if span.allocCache != 0 {
  6148  		theBit := sys.TrailingZeros64(span.allocCache)
  6149  		result := span.freeindex + uint16(theBit)
  6150  		if result < nelems {
  6151  			freeidx := result + 1
  6152  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6153  				span.allocCache >>= uint(theBit + 1)
  6154  				span.freeindex = freeidx
  6155  				span.allocCount++
  6156  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6157  					16 +
  6158  					span.base())
  6159  			}
  6160  		}
  6161  	}
  6162  	v := nextFreeFastResult
  6163  	if v == 0 {
  6164  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6165  	}
  6166  	x := unsafe.Pointer(v)
  6167  	(*[2]uint64)(x)[0] = 0
  6168  	(*[2]uint64)(x)[1] = 0
  6169  
  6170  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6171  
  6172  		c.tiny = uintptr(x)
  6173  		c.tinyoffset = constsize
  6174  	}
  6175  
  6176  	publicationBarrier()
  6177  
  6178  	if writeBarrier.enabled {
  6179  
  6180  		gcmarknewobject(span, uintptr(x))
  6181  	} else {
  6182  
  6183  		span.freeIndexForScan = span.freeindex
  6184  	}
  6185  
  6186  	c.nextSample -= int64(elemsize)
  6187  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6188  		profilealloc(mp, x, elemsize)
  6189  	}
  6190  	mp.mallocing = 0
  6191  	releasem(mp)
  6192  
  6193  	if checkGCTrigger {
  6194  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6195  			gcStart(t)
  6196  		}
  6197  	}
  6198  
  6199  	if raceenabled {
  6200  
  6201  		x = add(x, elemsize-constsize)
  6202  	}
  6203  	if valgrindenabled {
  6204  		valgrindMalloc(x, size)
  6205  	}
  6206  
  6207  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6208  		if assistG := getg().m.curg; assistG != nil {
  6209  			assistG.gcAssistBytes -= int64(elemsize - size)
  6210  		}
  6211  	}
  6212  
  6213  	if debug.malloc {
  6214  		postMallocgcDebug(x, elemsize, typ)
  6215  	}
  6216  	return x
  6217  }
  6218  
  6219  func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6220  	if doubleCheckMalloc {
  6221  		if gcphase == _GCmarktermination {
  6222  			throw("mallocgc called with gcphase == _GCmarktermination")
  6223  		}
  6224  	}
  6225  
  6226  	lockRankMayQueueFinalizer()
  6227  
  6228  	if debug.malloc {
  6229  		if x := preMallocgcDebug(size, typ); x != nil {
  6230  			return x
  6231  		}
  6232  	}
  6233  
  6234  	if gcBlackenEnabled != 0 {
  6235  		deductAssistCredit(size)
  6236  	}
  6237  
  6238  	const constsize = 15
  6239  
  6240  	const elemsize = 16
  6241  
  6242  	mp := acquirem()
  6243  	if doubleCheckMalloc {
  6244  		doubleCheckTiny(constsize, typ, mp)
  6245  	}
  6246  	mp.mallocing = 1
  6247  
  6248  	c := getMCache(mp)
  6249  	off := c.tinyoffset
  6250  
  6251  	if constsize&7 == 0 {
  6252  		off = alignUp(off, 8)
  6253  	} else if goarch.PtrSize == 4 && constsize == 12 {
  6254  
  6255  		off = alignUp(off, 8)
  6256  	} else if constsize&3 == 0 {
  6257  		off = alignUp(off, 4)
  6258  	} else if constsize&1 == 0 {
  6259  		off = alignUp(off, 2)
  6260  	}
  6261  	if off+constsize <= maxTinySize && c.tiny != 0 {
  6262  
  6263  		x := unsafe.Pointer(c.tiny + off)
  6264  		c.tinyoffset = off + constsize
  6265  		c.tinyAllocs++
  6266  		mp.mallocing = 0
  6267  		releasem(mp)
  6268  		const elemsize = 0
  6269  		{
  6270  
  6271  			if valgrindenabled {
  6272  				valgrindMalloc(x, size)
  6273  			}
  6274  
  6275  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6276  				if assistG := getg().m.curg; assistG != nil {
  6277  					assistG.gcAssistBytes -= int64(elemsize - size)
  6278  				}
  6279  			}
  6280  
  6281  			if debug.malloc {
  6282  				postMallocgcDebug(x, elemsize, typ)
  6283  			}
  6284  			return x
  6285  		}
  6286  
  6287  	}
  6288  
  6289  	checkGCTrigger := false
  6290  	span := c.alloc[tinySpanClass]
  6291  
  6292  	const nbytes = 8192
  6293  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  6294  		16,
  6295  	)
  6296  	var nextFreeFastResult gclinkptr
  6297  	if span.allocCache != 0 {
  6298  		theBit := sys.TrailingZeros64(span.allocCache)
  6299  		result := span.freeindex + uint16(theBit)
  6300  		if result < nelems {
  6301  			freeidx := result + 1
  6302  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6303  				span.allocCache >>= uint(theBit + 1)
  6304  				span.freeindex = freeidx
  6305  				span.allocCount++
  6306  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6307  					16 +
  6308  					span.base())
  6309  			}
  6310  		}
  6311  	}
  6312  	v := nextFreeFastResult
  6313  	if v == 0 {
  6314  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6315  	}
  6316  	x := unsafe.Pointer(v)
  6317  	(*[2]uint64)(x)[0] = 0
  6318  	(*[2]uint64)(x)[1] = 0
  6319  
  6320  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6321  
  6322  		c.tiny = uintptr(x)
  6323  		c.tinyoffset = constsize
  6324  	}
  6325  
  6326  	publicationBarrier()
  6327  
  6328  	if writeBarrier.enabled {
  6329  
  6330  		gcmarknewobject(span, uintptr(x))
  6331  	} else {
  6332  
  6333  		span.freeIndexForScan = span.freeindex
  6334  	}
  6335  
  6336  	c.nextSample -= int64(elemsize)
  6337  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6338  		profilealloc(mp, x, elemsize)
  6339  	}
  6340  	mp.mallocing = 0
  6341  	releasem(mp)
  6342  
  6343  	if checkGCTrigger {
  6344  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6345  			gcStart(t)
  6346  		}
  6347  	}
  6348  
  6349  	if raceenabled {
  6350  
  6351  		x = add(x, elemsize-constsize)
  6352  	}
  6353  	if valgrindenabled {
  6354  		valgrindMalloc(x, size)
  6355  	}
  6356  
  6357  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6358  		if assistG := getg().m.curg; assistG != nil {
  6359  			assistG.gcAssistBytes -= int64(elemsize - size)
  6360  		}
  6361  	}
  6362  
  6363  	if debug.malloc {
  6364  		postMallocgcDebug(x, elemsize, typ)
  6365  	}
  6366  	return x
  6367  }
  6368  
  6369  func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6370  	if doubleCheckMalloc {
  6371  		if gcphase == _GCmarktermination {
  6372  			throw("mallocgc called with gcphase == _GCmarktermination")
  6373  		}
  6374  	}
  6375  
  6376  	lockRankMayQueueFinalizer()
  6377  
  6378  	if debug.malloc {
  6379  		if x := preMallocgcDebug(size, typ); x != nil {
  6380  			return x
  6381  		}
  6382  	}
  6383  
  6384  	if gcBlackenEnabled != 0 {
  6385  		deductAssistCredit(size)
  6386  	}
  6387  
  6388  	const sizeclass = 2
  6389  
  6390  	const elemsize = 16
  6391  
  6392  	mp := acquirem()
  6393  	if doubleCheckMalloc {
  6394  		doubleCheckSmallNoScan(typ, mp)
  6395  	}
  6396  	mp.mallocing = 1
  6397  
  6398  	checkGCTrigger := false
  6399  	c := getMCache(mp)
  6400  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6401  	span := c.alloc[spc]
  6402  
  6403  	var nextFreeFastResult gclinkptr
  6404  	if span.allocCache != 0 {
  6405  		theBit := sys.TrailingZeros64(span.allocCache)
  6406  		result := span.freeindex + uint16(theBit)
  6407  		if result < span.nelems {
  6408  			freeidx := result + 1
  6409  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6410  				span.allocCache >>= uint(theBit + 1)
  6411  				span.freeindex = freeidx
  6412  				span.allocCount++
  6413  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6414  					16 +
  6415  					span.base())
  6416  			}
  6417  		}
  6418  	}
  6419  	v := nextFreeFastResult
  6420  	if v == 0 {
  6421  		v, span, checkGCTrigger = c.nextFree(spc)
  6422  	}
  6423  	x := unsafe.Pointer(v)
  6424  	if needzero && span.needzero != 0 {
  6425  		memclrNoHeapPointers(x, elemsize)
  6426  	}
  6427  
  6428  	publicationBarrier()
  6429  
  6430  	if writeBarrier.enabled {
  6431  
  6432  		gcmarknewobject(span, uintptr(x))
  6433  	} else {
  6434  
  6435  		span.freeIndexForScan = span.freeindex
  6436  	}
  6437  
  6438  	c.nextSample -= int64(elemsize)
  6439  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6440  		profilealloc(mp, x, elemsize)
  6441  	}
  6442  	mp.mallocing = 0
  6443  	releasem(mp)
  6444  
  6445  	if checkGCTrigger {
  6446  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6447  			gcStart(t)
  6448  		}
  6449  	}
  6450  	if valgrindenabled {
  6451  		valgrindMalloc(x, size)
  6452  	}
  6453  
  6454  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6455  		if assistG := getg().m.curg; assistG != nil {
  6456  			assistG.gcAssistBytes -= int64(elemsize - size)
  6457  		}
  6458  	}
  6459  
  6460  	if debug.malloc {
  6461  		postMallocgcDebug(x, elemsize, typ)
  6462  	}
  6463  	return x
  6464  }
  6465  
  6466  func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6467  	if doubleCheckMalloc {
  6468  		if gcphase == _GCmarktermination {
  6469  			throw("mallocgc called with gcphase == _GCmarktermination")
  6470  		}
  6471  	}
  6472  
  6473  	lockRankMayQueueFinalizer()
  6474  
  6475  	if debug.malloc {
  6476  		if x := preMallocgcDebug(size, typ); x != nil {
  6477  			return x
  6478  		}
  6479  	}
  6480  
  6481  	if gcBlackenEnabled != 0 {
  6482  		deductAssistCredit(size)
  6483  	}
  6484  
  6485  	const sizeclass = 3
  6486  
  6487  	const elemsize = 24
  6488  
  6489  	mp := acquirem()
  6490  	if doubleCheckMalloc {
  6491  		doubleCheckSmallNoScan(typ, mp)
  6492  	}
  6493  	mp.mallocing = 1
  6494  
  6495  	checkGCTrigger := false
  6496  	c := getMCache(mp)
  6497  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6498  	span := c.alloc[spc]
  6499  
  6500  	var nextFreeFastResult gclinkptr
  6501  	if span.allocCache != 0 {
  6502  		theBit := sys.TrailingZeros64(span.allocCache)
  6503  		result := span.freeindex + uint16(theBit)
  6504  		if result < span.nelems {
  6505  			freeidx := result + 1
  6506  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6507  				span.allocCache >>= uint(theBit + 1)
  6508  				span.freeindex = freeidx
  6509  				span.allocCount++
  6510  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6511  					24 +
  6512  					span.base())
  6513  			}
  6514  		}
  6515  	}
  6516  	v := nextFreeFastResult
  6517  	if v == 0 {
  6518  		v, span, checkGCTrigger = c.nextFree(spc)
  6519  	}
  6520  	x := unsafe.Pointer(v)
  6521  	if needzero && span.needzero != 0 {
  6522  		memclrNoHeapPointers(x, elemsize)
  6523  	}
  6524  
  6525  	publicationBarrier()
  6526  
  6527  	if writeBarrier.enabled {
  6528  
  6529  		gcmarknewobject(span, uintptr(x))
  6530  	} else {
  6531  
  6532  		span.freeIndexForScan = span.freeindex
  6533  	}
  6534  
  6535  	c.nextSample -= int64(elemsize)
  6536  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6537  		profilealloc(mp, x, elemsize)
  6538  	}
  6539  	mp.mallocing = 0
  6540  	releasem(mp)
  6541  
  6542  	if checkGCTrigger {
  6543  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6544  			gcStart(t)
  6545  		}
  6546  	}
  6547  	if valgrindenabled {
  6548  		valgrindMalloc(x, size)
  6549  	}
  6550  
  6551  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6552  		if assistG := getg().m.curg; assistG != nil {
  6553  			assistG.gcAssistBytes -= int64(elemsize - size)
  6554  		}
  6555  	}
  6556  
  6557  	if debug.malloc {
  6558  		postMallocgcDebug(x, elemsize, typ)
  6559  	}
  6560  	return x
  6561  }
  6562  
  6563  func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6564  	if doubleCheckMalloc {
  6565  		if gcphase == _GCmarktermination {
  6566  			throw("mallocgc called with gcphase == _GCmarktermination")
  6567  		}
  6568  	}
  6569  
  6570  	lockRankMayQueueFinalizer()
  6571  
  6572  	if debug.malloc {
  6573  		if x := preMallocgcDebug(size, typ); x != nil {
  6574  			return x
  6575  		}
  6576  	}
  6577  
  6578  	if gcBlackenEnabled != 0 {
  6579  		deductAssistCredit(size)
  6580  	}
  6581  
  6582  	const sizeclass = 4
  6583  
  6584  	const elemsize = 32
  6585  
  6586  	mp := acquirem()
  6587  	if doubleCheckMalloc {
  6588  		doubleCheckSmallNoScan(typ, mp)
  6589  	}
  6590  	mp.mallocing = 1
  6591  
  6592  	checkGCTrigger := false
  6593  	c := getMCache(mp)
  6594  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6595  	span := c.alloc[spc]
  6596  
  6597  	var nextFreeFastResult gclinkptr
  6598  	if span.allocCache != 0 {
  6599  		theBit := sys.TrailingZeros64(span.allocCache)
  6600  		result := span.freeindex + uint16(theBit)
  6601  		if result < span.nelems {
  6602  			freeidx := result + 1
  6603  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6604  				span.allocCache >>= uint(theBit + 1)
  6605  				span.freeindex = freeidx
  6606  				span.allocCount++
  6607  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6608  					32 +
  6609  					span.base())
  6610  			}
  6611  		}
  6612  	}
  6613  	v := nextFreeFastResult
  6614  	if v == 0 {
  6615  		v, span, checkGCTrigger = c.nextFree(spc)
  6616  	}
  6617  	x := unsafe.Pointer(v)
  6618  	if needzero && span.needzero != 0 {
  6619  		memclrNoHeapPointers(x, elemsize)
  6620  	}
  6621  
  6622  	publicationBarrier()
  6623  
  6624  	if writeBarrier.enabled {
  6625  
  6626  		gcmarknewobject(span, uintptr(x))
  6627  	} else {
  6628  
  6629  		span.freeIndexForScan = span.freeindex
  6630  	}
  6631  
  6632  	c.nextSample -= int64(elemsize)
  6633  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6634  		profilealloc(mp, x, elemsize)
  6635  	}
  6636  	mp.mallocing = 0
  6637  	releasem(mp)
  6638  
  6639  	if checkGCTrigger {
  6640  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6641  			gcStart(t)
  6642  		}
  6643  	}
  6644  	if valgrindenabled {
  6645  		valgrindMalloc(x, size)
  6646  	}
  6647  
  6648  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6649  		if assistG := getg().m.curg; assistG != nil {
  6650  			assistG.gcAssistBytes -= int64(elemsize - size)
  6651  		}
  6652  	}
  6653  
  6654  	if debug.malloc {
  6655  		postMallocgcDebug(x, elemsize, typ)
  6656  	}
  6657  	return x
  6658  }
  6659  
  6660  func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6661  	if doubleCheckMalloc {
  6662  		if gcphase == _GCmarktermination {
  6663  			throw("mallocgc called with gcphase == _GCmarktermination")
  6664  		}
  6665  	}
  6666  
  6667  	lockRankMayQueueFinalizer()
  6668  
  6669  	if debug.malloc {
  6670  		if x := preMallocgcDebug(size, typ); x != nil {
  6671  			return x
  6672  		}
  6673  	}
  6674  
  6675  	if gcBlackenEnabled != 0 {
  6676  		deductAssistCredit(size)
  6677  	}
  6678  
  6679  	const sizeclass = 5
  6680  
  6681  	const elemsize = 48
  6682  
  6683  	mp := acquirem()
  6684  	if doubleCheckMalloc {
  6685  		doubleCheckSmallNoScan(typ, mp)
  6686  	}
  6687  	mp.mallocing = 1
  6688  
  6689  	checkGCTrigger := false
  6690  	c := getMCache(mp)
  6691  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6692  	span := c.alloc[spc]
  6693  
  6694  	var nextFreeFastResult gclinkptr
  6695  	if span.allocCache != 0 {
  6696  		theBit := sys.TrailingZeros64(span.allocCache)
  6697  		result := span.freeindex + uint16(theBit)
  6698  		if result < span.nelems {
  6699  			freeidx := result + 1
  6700  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6701  				span.allocCache >>= uint(theBit + 1)
  6702  				span.freeindex = freeidx
  6703  				span.allocCount++
  6704  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6705  					48 +
  6706  					span.base())
  6707  			}
  6708  		}
  6709  	}
  6710  	v := nextFreeFastResult
  6711  	if v == 0 {
  6712  		v, span, checkGCTrigger = c.nextFree(spc)
  6713  	}
  6714  	x := unsafe.Pointer(v)
  6715  	if needzero && span.needzero != 0 {
  6716  		memclrNoHeapPointers(x, elemsize)
  6717  	}
  6718  
  6719  	publicationBarrier()
  6720  
  6721  	if writeBarrier.enabled {
  6722  
  6723  		gcmarknewobject(span, uintptr(x))
  6724  	} else {
  6725  
  6726  		span.freeIndexForScan = span.freeindex
  6727  	}
  6728  
  6729  	c.nextSample -= int64(elemsize)
  6730  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6731  		profilealloc(mp, x, elemsize)
  6732  	}
  6733  	mp.mallocing = 0
  6734  	releasem(mp)
  6735  
  6736  	if checkGCTrigger {
  6737  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6738  			gcStart(t)
  6739  		}
  6740  	}
  6741  	if valgrindenabled {
  6742  		valgrindMalloc(x, size)
  6743  	}
  6744  
  6745  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6746  		if assistG := getg().m.curg; assistG != nil {
  6747  			assistG.gcAssistBytes -= int64(elemsize - size)
  6748  		}
  6749  	}
  6750  
  6751  	if debug.malloc {
  6752  		postMallocgcDebug(x, elemsize, typ)
  6753  	}
  6754  	return x
  6755  }
  6756  
  6757  func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6758  	if doubleCheckMalloc {
  6759  		if gcphase == _GCmarktermination {
  6760  			throw("mallocgc called with gcphase == _GCmarktermination")
  6761  		}
  6762  	}
  6763  
  6764  	lockRankMayQueueFinalizer()
  6765  
  6766  	if debug.malloc {
  6767  		if x := preMallocgcDebug(size, typ); x != nil {
  6768  			return x
  6769  		}
  6770  	}
  6771  
  6772  	if gcBlackenEnabled != 0 {
  6773  		deductAssistCredit(size)
  6774  	}
  6775  
  6776  	const sizeclass = 6
  6777  
  6778  	const elemsize = 64
  6779  
  6780  	mp := acquirem()
  6781  	if doubleCheckMalloc {
  6782  		doubleCheckSmallNoScan(typ, mp)
  6783  	}
  6784  	mp.mallocing = 1
  6785  
  6786  	checkGCTrigger := false
  6787  	c := getMCache(mp)
  6788  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6789  	span := c.alloc[spc]
  6790  
  6791  	var nextFreeFastResult gclinkptr
  6792  	if span.allocCache != 0 {
  6793  		theBit := sys.TrailingZeros64(span.allocCache)
  6794  		result := span.freeindex + uint16(theBit)
  6795  		if result < span.nelems {
  6796  			freeidx := result + 1
  6797  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6798  				span.allocCache >>= uint(theBit + 1)
  6799  				span.freeindex = freeidx
  6800  				span.allocCount++
  6801  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6802  					64 +
  6803  					span.base())
  6804  			}
  6805  		}
  6806  	}
  6807  	v := nextFreeFastResult
  6808  	if v == 0 {
  6809  		v, span, checkGCTrigger = c.nextFree(spc)
  6810  	}
  6811  	x := unsafe.Pointer(v)
  6812  	if needzero && span.needzero != 0 {
  6813  		memclrNoHeapPointers(x, elemsize)
  6814  	}
  6815  
  6816  	publicationBarrier()
  6817  
  6818  	if writeBarrier.enabled {
  6819  
  6820  		gcmarknewobject(span, uintptr(x))
  6821  	} else {
  6822  
  6823  		span.freeIndexForScan = span.freeindex
  6824  	}
  6825  
  6826  	c.nextSample -= int64(elemsize)
  6827  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6828  		profilealloc(mp, x, elemsize)
  6829  	}
  6830  	mp.mallocing = 0
  6831  	releasem(mp)
  6832  
  6833  	if checkGCTrigger {
  6834  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6835  			gcStart(t)
  6836  		}
  6837  	}
  6838  	if valgrindenabled {
  6839  		valgrindMalloc(x, size)
  6840  	}
  6841  
  6842  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6843  		if assistG := getg().m.curg; assistG != nil {
  6844  			assistG.gcAssistBytes -= int64(elemsize - size)
  6845  		}
  6846  	}
  6847  
  6848  	if debug.malloc {
  6849  		postMallocgcDebug(x, elemsize, typ)
  6850  	}
  6851  	return x
  6852  }
  6853  
  6854  func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6855  	if doubleCheckMalloc {
  6856  		if gcphase == _GCmarktermination {
  6857  			throw("mallocgc called with gcphase == _GCmarktermination")
  6858  		}
  6859  	}
  6860  
  6861  	lockRankMayQueueFinalizer()
  6862  
  6863  	if debug.malloc {
  6864  		if x := preMallocgcDebug(size, typ); x != nil {
  6865  			return x
  6866  		}
  6867  	}
  6868  
  6869  	if gcBlackenEnabled != 0 {
  6870  		deductAssistCredit(size)
  6871  	}
  6872  
  6873  	const sizeclass = 7
  6874  
  6875  	const elemsize = 80
  6876  
  6877  	mp := acquirem()
  6878  	if doubleCheckMalloc {
  6879  		doubleCheckSmallNoScan(typ, mp)
  6880  	}
  6881  	mp.mallocing = 1
  6882  
  6883  	checkGCTrigger := false
  6884  	c := getMCache(mp)
  6885  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6886  	span := c.alloc[spc]
  6887  
  6888  	var nextFreeFastResult gclinkptr
  6889  	if span.allocCache != 0 {
  6890  		theBit := sys.TrailingZeros64(span.allocCache)
  6891  		result := span.freeindex + uint16(theBit)
  6892  		if result < span.nelems {
  6893  			freeidx := result + 1
  6894  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6895  				span.allocCache >>= uint(theBit + 1)
  6896  				span.freeindex = freeidx
  6897  				span.allocCount++
  6898  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6899  					80 +
  6900  					span.base())
  6901  			}
  6902  		}
  6903  	}
  6904  	v := nextFreeFastResult
  6905  	if v == 0 {
  6906  		v, span, checkGCTrigger = c.nextFree(spc)
  6907  	}
  6908  	x := unsafe.Pointer(v)
  6909  	if needzero && span.needzero != 0 {
  6910  		memclrNoHeapPointers(x, elemsize)
  6911  	}
  6912  
  6913  	publicationBarrier()
  6914  
  6915  	if writeBarrier.enabled {
  6916  
  6917  		gcmarknewobject(span, uintptr(x))
  6918  	} else {
  6919  
  6920  		span.freeIndexForScan = span.freeindex
  6921  	}
  6922  
  6923  	c.nextSample -= int64(elemsize)
  6924  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6925  		profilealloc(mp, x, elemsize)
  6926  	}
  6927  	mp.mallocing = 0
  6928  	releasem(mp)
  6929  
  6930  	if checkGCTrigger {
  6931  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6932  			gcStart(t)
  6933  		}
  6934  	}
  6935  	if valgrindenabled {
  6936  		valgrindMalloc(x, size)
  6937  	}
  6938  
  6939  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6940  		if assistG := getg().m.curg; assistG != nil {
  6941  			assistG.gcAssistBytes -= int64(elemsize - size)
  6942  		}
  6943  	}
  6944  
  6945  	if debug.malloc {
  6946  		postMallocgcDebug(x, elemsize, typ)
  6947  	}
  6948  	return x
  6949  }
  6950  
  6951  func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6952  	if doubleCheckMalloc {
  6953  		if gcphase == _GCmarktermination {
  6954  			throw("mallocgc called with gcphase == _GCmarktermination")
  6955  		}
  6956  	}
  6957  
  6958  	lockRankMayQueueFinalizer()
  6959  
  6960  	if debug.malloc {
  6961  		if x := preMallocgcDebug(size, typ); x != nil {
  6962  			return x
  6963  		}
  6964  	}
  6965  
  6966  	if gcBlackenEnabled != 0 {
  6967  		deductAssistCredit(size)
  6968  	}
  6969  
  6970  	const sizeclass = 8
  6971  
  6972  	const elemsize = 96
  6973  
  6974  	mp := acquirem()
  6975  	if doubleCheckMalloc {
  6976  		doubleCheckSmallNoScan(typ, mp)
  6977  	}
  6978  	mp.mallocing = 1
  6979  
  6980  	checkGCTrigger := false
  6981  	c := getMCache(mp)
  6982  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6983  	span := c.alloc[spc]
  6984  
  6985  	var nextFreeFastResult gclinkptr
  6986  	if span.allocCache != 0 {
  6987  		theBit := sys.TrailingZeros64(span.allocCache)
  6988  		result := span.freeindex + uint16(theBit)
  6989  		if result < span.nelems {
  6990  			freeidx := result + 1
  6991  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6992  				span.allocCache >>= uint(theBit + 1)
  6993  				span.freeindex = freeidx
  6994  				span.allocCount++
  6995  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6996  					96 +
  6997  					span.base())
  6998  			}
  6999  		}
  7000  	}
  7001  	v := nextFreeFastResult
  7002  	if v == 0 {
  7003  		v, span, checkGCTrigger = c.nextFree(spc)
  7004  	}
  7005  	x := unsafe.Pointer(v)
  7006  	if needzero && span.needzero != 0 {
  7007  		memclrNoHeapPointers(x, elemsize)
  7008  	}
  7009  
  7010  	publicationBarrier()
  7011  
  7012  	if writeBarrier.enabled {
  7013  
  7014  		gcmarknewobject(span, uintptr(x))
  7015  	} else {
  7016  
  7017  		span.freeIndexForScan = span.freeindex
  7018  	}
  7019  
  7020  	c.nextSample -= int64(elemsize)
  7021  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7022  		profilealloc(mp, x, elemsize)
  7023  	}
  7024  	mp.mallocing = 0
  7025  	releasem(mp)
  7026  
  7027  	if checkGCTrigger {
  7028  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7029  			gcStart(t)
  7030  		}
  7031  	}
  7032  	if valgrindenabled {
  7033  		valgrindMalloc(x, size)
  7034  	}
  7035  
  7036  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7037  		if assistG := getg().m.curg; assistG != nil {
  7038  			assistG.gcAssistBytes -= int64(elemsize - size)
  7039  		}
  7040  	}
  7041  
  7042  	if debug.malloc {
  7043  		postMallocgcDebug(x, elemsize, typ)
  7044  	}
  7045  	return x
  7046  }
  7047  
  7048  func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7049  	if doubleCheckMalloc {
  7050  		if gcphase == _GCmarktermination {
  7051  			throw("mallocgc called with gcphase == _GCmarktermination")
  7052  		}
  7053  	}
  7054  
  7055  	lockRankMayQueueFinalizer()
  7056  
  7057  	if debug.malloc {
  7058  		if x := preMallocgcDebug(size, typ); x != nil {
  7059  			return x
  7060  		}
  7061  	}
  7062  
  7063  	if gcBlackenEnabled != 0 {
  7064  		deductAssistCredit(size)
  7065  	}
  7066  
  7067  	const sizeclass = 9
  7068  
  7069  	const elemsize = 112
  7070  
  7071  	mp := acquirem()
  7072  	if doubleCheckMalloc {
  7073  		doubleCheckSmallNoScan(typ, mp)
  7074  	}
  7075  	mp.mallocing = 1
  7076  
  7077  	checkGCTrigger := false
  7078  	c := getMCache(mp)
  7079  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7080  	span := c.alloc[spc]
  7081  
  7082  	var nextFreeFastResult gclinkptr
  7083  	if span.allocCache != 0 {
  7084  		theBit := sys.TrailingZeros64(span.allocCache)
  7085  		result := span.freeindex + uint16(theBit)
  7086  		if result < span.nelems {
  7087  			freeidx := result + 1
  7088  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7089  				span.allocCache >>= uint(theBit + 1)
  7090  				span.freeindex = freeidx
  7091  				span.allocCount++
  7092  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7093  					112 +
  7094  					span.base())
  7095  			}
  7096  		}
  7097  	}
  7098  	v := nextFreeFastResult
  7099  	if v == 0 {
  7100  		v, span, checkGCTrigger = c.nextFree(spc)
  7101  	}
  7102  	x := unsafe.Pointer(v)
  7103  	if needzero && span.needzero != 0 {
  7104  		memclrNoHeapPointers(x, elemsize)
  7105  	}
  7106  
  7107  	publicationBarrier()
  7108  
  7109  	if writeBarrier.enabled {
  7110  
  7111  		gcmarknewobject(span, uintptr(x))
  7112  	} else {
  7113  
  7114  		span.freeIndexForScan = span.freeindex
  7115  	}
  7116  
  7117  	c.nextSample -= int64(elemsize)
  7118  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7119  		profilealloc(mp, x, elemsize)
  7120  	}
  7121  	mp.mallocing = 0
  7122  	releasem(mp)
  7123  
  7124  	if checkGCTrigger {
  7125  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7126  			gcStart(t)
  7127  		}
  7128  	}
  7129  	if valgrindenabled {
  7130  		valgrindMalloc(x, size)
  7131  	}
  7132  
  7133  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7134  		if assistG := getg().m.curg; assistG != nil {
  7135  			assistG.gcAssistBytes -= int64(elemsize - size)
  7136  		}
  7137  	}
  7138  
  7139  	if debug.malloc {
  7140  		postMallocgcDebug(x, elemsize, typ)
  7141  	}
  7142  	return x
  7143  }
  7144  
  7145  func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7146  	if doubleCheckMalloc {
  7147  		if gcphase == _GCmarktermination {
  7148  			throw("mallocgc called with gcphase == _GCmarktermination")
  7149  		}
  7150  	}
  7151  
  7152  	lockRankMayQueueFinalizer()
  7153  
  7154  	if debug.malloc {
  7155  		if x := preMallocgcDebug(size, typ); x != nil {
  7156  			return x
  7157  		}
  7158  	}
  7159  
  7160  	if gcBlackenEnabled != 0 {
  7161  		deductAssistCredit(size)
  7162  	}
  7163  
  7164  	const sizeclass = 10
  7165  
  7166  	const elemsize = 128
  7167  
  7168  	mp := acquirem()
  7169  	if doubleCheckMalloc {
  7170  		doubleCheckSmallNoScan(typ, mp)
  7171  	}
  7172  	mp.mallocing = 1
  7173  
  7174  	checkGCTrigger := false
  7175  	c := getMCache(mp)
  7176  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7177  	span := c.alloc[spc]
  7178  
  7179  	var nextFreeFastResult gclinkptr
  7180  	if span.allocCache != 0 {
  7181  		theBit := sys.TrailingZeros64(span.allocCache)
  7182  		result := span.freeindex + uint16(theBit)
  7183  		if result < span.nelems {
  7184  			freeidx := result + 1
  7185  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7186  				span.allocCache >>= uint(theBit + 1)
  7187  				span.freeindex = freeidx
  7188  				span.allocCount++
  7189  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7190  					128 +
  7191  					span.base())
  7192  			}
  7193  		}
  7194  	}
  7195  	v := nextFreeFastResult
  7196  	if v == 0 {
  7197  		v, span, checkGCTrigger = c.nextFree(spc)
  7198  	}
  7199  	x := unsafe.Pointer(v)
  7200  	if needzero && span.needzero != 0 {
  7201  		memclrNoHeapPointers(x, elemsize)
  7202  	}
  7203  
  7204  	publicationBarrier()
  7205  
  7206  	if writeBarrier.enabled {
  7207  
  7208  		gcmarknewobject(span, uintptr(x))
  7209  	} else {
  7210  
  7211  		span.freeIndexForScan = span.freeindex
  7212  	}
  7213  
  7214  	c.nextSample -= int64(elemsize)
  7215  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7216  		profilealloc(mp, x, elemsize)
  7217  	}
  7218  	mp.mallocing = 0
  7219  	releasem(mp)
  7220  
  7221  	if checkGCTrigger {
  7222  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7223  			gcStart(t)
  7224  		}
  7225  	}
  7226  	if valgrindenabled {
  7227  		valgrindMalloc(x, size)
  7228  	}
  7229  
  7230  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7231  		if assistG := getg().m.curg; assistG != nil {
  7232  			assistG.gcAssistBytes -= int64(elemsize - size)
  7233  		}
  7234  	}
  7235  
  7236  	if debug.malloc {
  7237  		postMallocgcDebug(x, elemsize, typ)
  7238  	}
  7239  	return x
  7240  }
  7241  
  7242  func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7243  	if doubleCheckMalloc {
  7244  		if gcphase == _GCmarktermination {
  7245  			throw("mallocgc called with gcphase == _GCmarktermination")
  7246  		}
  7247  	}
  7248  
  7249  	lockRankMayQueueFinalizer()
  7250  
  7251  	if debug.malloc {
  7252  		if x := preMallocgcDebug(size, typ); x != nil {
  7253  			return x
  7254  		}
  7255  	}
  7256  
  7257  	if gcBlackenEnabled != 0 {
  7258  		deductAssistCredit(size)
  7259  	}
  7260  
  7261  	const sizeclass = 11
  7262  
  7263  	const elemsize = 144
  7264  
  7265  	mp := acquirem()
  7266  	if doubleCheckMalloc {
  7267  		doubleCheckSmallNoScan(typ, mp)
  7268  	}
  7269  	mp.mallocing = 1
  7270  
  7271  	checkGCTrigger := false
  7272  	c := getMCache(mp)
  7273  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7274  	span := c.alloc[spc]
  7275  
  7276  	var nextFreeFastResult gclinkptr
  7277  	if span.allocCache != 0 {
  7278  		theBit := sys.TrailingZeros64(span.allocCache)
  7279  		result := span.freeindex + uint16(theBit)
  7280  		if result < span.nelems {
  7281  			freeidx := result + 1
  7282  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7283  				span.allocCache >>= uint(theBit + 1)
  7284  				span.freeindex = freeidx
  7285  				span.allocCount++
  7286  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7287  					144 +
  7288  					span.base())
  7289  			}
  7290  		}
  7291  	}
  7292  	v := nextFreeFastResult
  7293  	if v == 0 {
  7294  		v, span, checkGCTrigger = c.nextFree(spc)
  7295  	}
  7296  	x := unsafe.Pointer(v)
  7297  	if needzero && span.needzero != 0 {
  7298  		memclrNoHeapPointers(x, elemsize)
  7299  	}
  7300  
  7301  	publicationBarrier()
  7302  
  7303  	if writeBarrier.enabled {
  7304  
  7305  		gcmarknewobject(span, uintptr(x))
  7306  	} else {
  7307  
  7308  		span.freeIndexForScan = span.freeindex
  7309  	}
  7310  
  7311  	c.nextSample -= int64(elemsize)
  7312  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7313  		profilealloc(mp, x, elemsize)
  7314  	}
  7315  	mp.mallocing = 0
  7316  	releasem(mp)
  7317  
  7318  	if checkGCTrigger {
  7319  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7320  			gcStart(t)
  7321  		}
  7322  	}
  7323  	if valgrindenabled {
  7324  		valgrindMalloc(x, size)
  7325  	}
  7326  
  7327  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7328  		if assistG := getg().m.curg; assistG != nil {
  7329  			assistG.gcAssistBytes -= int64(elemsize - size)
  7330  		}
  7331  	}
  7332  
  7333  	if debug.malloc {
  7334  		postMallocgcDebug(x, elemsize, typ)
  7335  	}
  7336  	return x
  7337  }
  7338  
  7339  func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7340  	if doubleCheckMalloc {
  7341  		if gcphase == _GCmarktermination {
  7342  			throw("mallocgc called with gcphase == _GCmarktermination")
  7343  		}
  7344  	}
  7345  
  7346  	lockRankMayQueueFinalizer()
  7347  
  7348  	if debug.malloc {
  7349  		if x := preMallocgcDebug(size, typ); x != nil {
  7350  			return x
  7351  		}
  7352  	}
  7353  
  7354  	if gcBlackenEnabled != 0 {
  7355  		deductAssistCredit(size)
  7356  	}
  7357  
  7358  	const sizeclass = 12
  7359  
  7360  	const elemsize = 160
  7361  
  7362  	mp := acquirem()
  7363  	if doubleCheckMalloc {
  7364  		doubleCheckSmallNoScan(typ, mp)
  7365  	}
  7366  	mp.mallocing = 1
  7367  
  7368  	checkGCTrigger := false
  7369  	c := getMCache(mp)
  7370  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7371  	span := c.alloc[spc]
  7372  
  7373  	var nextFreeFastResult gclinkptr
  7374  	if span.allocCache != 0 {
  7375  		theBit := sys.TrailingZeros64(span.allocCache)
  7376  		result := span.freeindex + uint16(theBit)
  7377  		if result < span.nelems {
  7378  			freeidx := result + 1
  7379  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7380  				span.allocCache >>= uint(theBit + 1)
  7381  				span.freeindex = freeidx
  7382  				span.allocCount++
  7383  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7384  					160 +
  7385  					span.base())
  7386  			}
  7387  		}
  7388  	}
  7389  	v := nextFreeFastResult
  7390  	if v == 0 {
  7391  		v, span, checkGCTrigger = c.nextFree(spc)
  7392  	}
  7393  	x := unsafe.Pointer(v)
  7394  	if needzero && span.needzero != 0 {
  7395  		memclrNoHeapPointers(x, elemsize)
  7396  	}
  7397  
  7398  	publicationBarrier()
  7399  
  7400  	if writeBarrier.enabled {
  7401  
  7402  		gcmarknewobject(span, uintptr(x))
  7403  	} else {
  7404  
  7405  		span.freeIndexForScan = span.freeindex
  7406  	}
  7407  
  7408  	c.nextSample -= int64(elemsize)
  7409  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7410  		profilealloc(mp, x, elemsize)
  7411  	}
  7412  	mp.mallocing = 0
  7413  	releasem(mp)
  7414  
  7415  	if checkGCTrigger {
  7416  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7417  			gcStart(t)
  7418  		}
  7419  	}
  7420  	if valgrindenabled {
  7421  		valgrindMalloc(x, size)
  7422  	}
  7423  
  7424  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7425  		if assistG := getg().m.curg; assistG != nil {
  7426  			assistG.gcAssistBytes -= int64(elemsize - size)
  7427  		}
  7428  	}
  7429  
  7430  	if debug.malloc {
  7431  		postMallocgcDebug(x, elemsize, typ)
  7432  	}
  7433  	return x
  7434  }
  7435  
  7436  func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7437  	if doubleCheckMalloc {
  7438  		if gcphase == _GCmarktermination {
  7439  			throw("mallocgc called with gcphase == _GCmarktermination")
  7440  		}
  7441  	}
  7442  
  7443  	lockRankMayQueueFinalizer()
  7444  
  7445  	if debug.malloc {
  7446  		if x := preMallocgcDebug(size, typ); x != nil {
  7447  			return x
  7448  		}
  7449  	}
  7450  
  7451  	if gcBlackenEnabled != 0 {
  7452  		deductAssistCredit(size)
  7453  	}
  7454  
  7455  	const sizeclass = 13
  7456  
  7457  	const elemsize = 176
  7458  
  7459  	mp := acquirem()
  7460  	if doubleCheckMalloc {
  7461  		doubleCheckSmallNoScan(typ, mp)
  7462  	}
  7463  	mp.mallocing = 1
  7464  
  7465  	checkGCTrigger := false
  7466  	c := getMCache(mp)
  7467  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7468  	span := c.alloc[spc]
  7469  
  7470  	var nextFreeFastResult gclinkptr
  7471  	if span.allocCache != 0 {
  7472  		theBit := sys.TrailingZeros64(span.allocCache)
  7473  		result := span.freeindex + uint16(theBit)
  7474  		if result < span.nelems {
  7475  			freeidx := result + 1
  7476  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7477  				span.allocCache >>= uint(theBit + 1)
  7478  				span.freeindex = freeidx
  7479  				span.allocCount++
  7480  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7481  					176 +
  7482  					span.base())
  7483  			}
  7484  		}
  7485  	}
  7486  	v := nextFreeFastResult
  7487  	if v == 0 {
  7488  		v, span, checkGCTrigger = c.nextFree(spc)
  7489  	}
  7490  	x := unsafe.Pointer(v)
  7491  	if needzero && span.needzero != 0 {
  7492  		memclrNoHeapPointers(x, elemsize)
  7493  	}
  7494  
  7495  	publicationBarrier()
  7496  
  7497  	if writeBarrier.enabled {
  7498  
  7499  		gcmarknewobject(span, uintptr(x))
  7500  	} else {
  7501  
  7502  		span.freeIndexForScan = span.freeindex
  7503  	}
  7504  
  7505  	c.nextSample -= int64(elemsize)
  7506  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7507  		profilealloc(mp, x, elemsize)
  7508  	}
  7509  	mp.mallocing = 0
  7510  	releasem(mp)
  7511  
  7512  	if checkGCTrigger {
  7513  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7514  			gcStart(t)
  7515  		}
  7516  	}
  7517  	if valgrindenabled {
  7518  		valgrindMalloc(x, size)
  7519  	}
  7520  
  7521  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7522  		if assistG := getg().m.curg; assistG != nil {
  7523  			assistG.gcAssistBytes -= int64(elemsize - size)
  7524  		}
  7525  	}
  7526  
  7527  	if debug.malloc {
  7528  		postMallocgcDebug(x, elemsize, typ)
  7529  	}
  7530  	return x
  7531  }
  7532  
  7533  func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7534  	if doubleCheckMalloc {
  7535  		if gcphase == _GCmarktermination {
  7536  			throw("mallocgc called with gcphase == _GCmarktermination")
  7537  		}
  7538  	}
  7539  
  7540  	lockRankMayQueueFinalizer()
  7541  
  7542  	if debug.malloc {
  7543  		if x := preMallocgcDebug(size, typ); x != nil {
  7544  			return x
  7545  		}
  7546  	}
  7547  
  7548  	if gcBlackenEnabled != 0 {
  7549  		deductAssistCredit(size)
  7550  	}
  7551  
  7552  	const sizeclass = 14
  7553  
  7554  	const elemsize = 192
  7555  
  7556  	mp := acquirem()
  7557  	if doubleCheckMalloc {
  7558  		doubleCheckSmallNoScan(typ, mp)
  7559  	}
  7560  	mp.mallocing = 1
  7561  
  7562  	checkGCTrigger := false
  7563  	c := getMCache(mp)
  7564  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7565  	span := c.alloc[spc]
  7566  
  7567  	var nextFreeFastResult gclinkptr
  7568  	if span.allocCache != 0 {
  7569  		theBit := sys.TrailingZeros64(span.allocCache)
  7570  		result := span.freeindex + uint16(theBit)
  7571  		if result < span.nelems {
  7572  			freeidx := result + 1
  7573  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7574  				span.allocCache >>= uint(theBit + 1)
  7575  				span.freeindex = freeidx
  7576  				span.allocCount++
  7577  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7578  					192 +
  7579  					span.base())
  7580  			}
  7581  		}
  7582  	}
  7583  	v := nextFreeFastResult
  7584  	if v == 0 {
  7585  		v, span, checkGCTrigger = c.nextFree(spc)
  7586  	}
  7587  	x := unsafe.Pointer(v)
  7588  	if needzero && span.needzero != 0 {
  7589  		memclrNoHeapPointers(x, elemsize)
  7590  	}
  7591  
  7592  	publicationBarrier()
  7593  
  7594  	if writeBarrier.enabled {
  7595  
  7596  		gcmarknewobject(span, uintptr(x))
  7597  	} else {
  7598  
  7599  		span.freeIndexForScan = span.freeindex
  7600  	}
  7601  
  7602  	c.nextSample -= int64(elemsize)
  7603  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7604  		profilealloc(mp, x, elemsize)
  7605  	}
  7606  	mp.mallocing = 0
  7607  	releasem(mp)
  7608  
  7609  	if checkGCTrigger {
  7610  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7611  			gcStart(t)
  7612  		}
  7613  	}
  7614  	if valgrindenabled {
  7615  		valgrindMalloc(x, size)
  7616  	}
  7617  
  7618  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7619  		if assistG := getg().m.curg; assistG != nil {
  7620  			assistG.gcAssistBytes -= int64(elemsize - size)
  7621  		}
  7622  	}
  7623  
  7624  	if debug.malloc {
  7625  		postMallocgcDebug(x, elemsize, typ)
  7626  	}
  7627  	return x
  7628  }
  7629  
  7630  func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7631  	if doubleCheckMalloc {
  7632  		if gcphase == _GCmarktermination {
  7633  			throw("mallocgc called with gcphase == _GCmarktermination")
  7634  		}
  7635  	}
  7636  
  7637  	lockRankMayQueueFinalizer()
  7638  
  7639  	if debug.malloc {
  7640  		if x := preMallocgcDebug(size, typ); x != nil {
  7641  			return x
  7642  		}
  7643  	}
  7644  
  7645  	if gcBlackenEnabled != 0 {
  7646  		deductAssistCredit(size)
  7647  	}
  7648  
  7649  	const sizeclass = 15
  7650  
  7651  	const elemsize = 208
  7652  
  7653  	mp := acquirem()
  7654  	if doubleCheckMalloc {
  7655  		doubleCheckSmallNoScan(typ, mp)
  7656  	}
  7657  	mp.mallocing = 1
  7658  
  7659  	checkGCTrigger := false
  7660  	c := getMCache(mp)
  7661  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7662  	span := c.alloc[spc]
  7663  
  7664  	var nextFreeFastResult gclinkptr
  7665  	if span.allocCache != 0 {
  7666  		theBit := sys.TrailingZeros64(span.allocCache)
  7667  		result := span.freeindex + uint16(theBit)
  7668  		if result < span.nelems {
  7669  			freeidx := result + 1
  7670  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7671  				span.allocCache >>= uint(theBit + 1)
  7672  				span.freeindex = freeidx
  7673  				span.allocCount++
  7674  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7675  					208 +
  7676  					span.base())
  7677  			}
  7678  		}
  7679  	}
  7680  	v := nextFreeFastResult
  7681  	if v == 0 {
  7682  		v, span, checkGCTrigger = c.nextFree(spc)
  7683  	}
  7684  	x := unsafe.Pointer(v)
  7685  	if needzero && span.needzero != 0 {
  7686  		memclrNoHeapPointers(x, elemsize)
  7687  	}
  7688  
  7689  	publicationBarrier()
  7690  
  7691  	if writeBarrier.enabled {
  7692  
  7693  		gcmarknewobject(span, uintptr(x))
  7694  	} else {
  7695  
  7696  		span.freeIndexForScan = span.freeindex
  7697  	}
  7698  
  7699  	c.nextSample -= int64(elemsize)
  7700  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7701  		profilealloc(mp, x, elemsize)
  7702  	}
  7703  	mp.mallocing = 0
  7704  	releasem(mp)
  7705  
  7706  	if checkGCTrigger {
  7707  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7708  			gcStart(t)
  7709  		}
  7710  	}
  7711  	if valgrindenabled {
  7712  		valgrindMalloc(x, size)
  7713  	}
  7714  
  7715  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7716  		if assistG := getg().m.curg; assistG != nil {
  7717  			assistG.gcAssistBytes -= int64(elemsize - size)
  7718  		}
  7719  	}
  7720  
  7721  	if debug.malloc {
  7722  		postMallocgcDebug(x, elemsize, typ)
  7723  	}
  7724  	return x
  7725  }
  7726  
  7727  func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7728  	if doubleCheckMalloc {
  7729  		if gcphase == _GCmarktermination {
  7730  			throw("mallocgc called with gcphase == _GCmarktermination")
  7731  		}
  7732  	}
  7733  
  7734  	lockRankMayQueueFinalizer()
  7735  
  7736  	if debug.malloc {
  7737  		if x := preMallocgcDebug(size, typ); x != nil {
  7738  			return x
  7739  		}
  7740  	}
  7741  
  7742  	if gcBlackenEnabled != 0 {
  7743  		deductAssistCredit(size)
  7744  	}
  7745  
  7746  	const sizeclass = 16
  7747  
  7748  	const elemsize = 224
  7749  
  7750  	mp := acquirem()
  7751  	if doubleCheckMalloc {
  7752  		doubleCheckSmallNoScan(typ, mp)
  7753  	}
  7754  	mp.mallocing = 1
  7755  
  7756  	checkGCTrigger := false
  7757  	c := getMCache(mp)
  7758  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7759  	span := c.alloc[spc]
  7760  
  7761  	var nextFreeFastResult gclinkptr
  7762  	if span.allocCache != 0 {
  7763  		theBit := sys.TrailingZeros64(span.allocCache)
  7764  		result := span.freeindex + uint16(theBit)
  7765  		if result < span.nelems {
  7766  			freeidx := result + 1
  7767  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7768  				span.allocCache >>= uint(theBit + 1)
  7769  				span.freeindex = freeidx
  7770  				span.allocCount++
  7771  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7772  					224 +
  7773  					span.base())
  7774  			}
  7775  		}
  7776  	}
  7777  	v := nextFreeFastResult
  7778  	if v == 0 {
  7779  		v, span, checkGCTrigger = c.nextFree(spc)
  7780  	}
  7781  	x := unsafe.Pointer(v)
  7782  	if needzero && span.needzero != 0 {
  7783  		memclrNoHeapPointers(x, elemsize)
  7784  	}
  7785  
  7786  	publicationBarrier()
  7787  
  7788  	if writeBarrier.enabled {
  7789  
  7790  		gcmarknewobject(span, uintptr(x))
  7791  	} else {
  7792  
  7793  		span.freeIndexForScan = span.freeindex
  7794  	}
  7795  
  7796  	c.nextSample -= int64(elemsize)
  7797  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7798  		profilealloc(mp, x, elemsize)
  7799  	}
  7800  	mp.mallocing = 0
  7801  	releasem(mp)
  7802  
  7803  	if checkGCTrigger {
  7804  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7805  			gcStart(t)
  7806  		}
  7807  	}
  7808  	if valgrindenabled {
  7809  		valgrindMalloc(x, size)
  7810  	}
  7811  
  7812  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7813  		if assistG := getg().m.curg; assistG != nil {
  7814  			assistG.gcAssistBytes -= int64(elemsize - size)
  7815  		}
  7816  	}
  7817  
  7818  	if debug.malloc {
  7819  		postMallocgcDebug(x, elemsize, typ)
  7820  	}
  7821  	return x
  7822  }
  7823  
  7824  func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7825  	if doubleCheckMalloc {
  7826  		if gcphase == _GCmarktermination {
  7827  			throw("mallocgc called with gcphase == _GCmarktermination")
  7828  		}
  7829  	}
  7830  
  7831  	lockRankMayQueueFinalizer()
  7832  
  7833  	if debug.malloc {
  7834  		if x := preMallocgcDebug(size, typ); x != nil {
  7835  			return x
  7836  		}
  7837  	}
  7838  
  7839  	if gcBlackenEnabled != 0 {
  7840  		deductAssistCredit(size)
  7841  	}
  7842  
  7843  	const sizeclass = 17
  7844  
  7845  	const elemsize = 240
  7846  
  7847  	mp := acquirem()
  7848  	if doubleCheckMalloc {
  7849  		doubleCheckSmallNoScan(typ, mp)
  7850  	}
  7851  	mp.mallocing = 1
  7852  
  7853  	checkGCTrigger := false
  7854  	c := getMCache(mp)
  7855  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7856  	span := c.alloc[spc]
  7857  
  7858  	var nextFreeFastResult gclinkptr
  7859  	if span.allocCache != 0 {
  7860  		theBit := sys.TrailingZeros64(span.allocCache)
  7861  		result := span.freeindex + uint16(theBit)
  7862  		if result < span.nelems {
  7863  			freeidx := result + 1
  7864  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7865  				span.allocCache >>= uint(theBit + 1)
  7866  				span.freeindex = freeidx
  7867  				span.allocCount++
  7868  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7869  					240 +
  7870  					span.base())
  7871  			}
  7872  		}
  7873  	}
  7874  	v := nextFreeFastResult
  7875  	if v == 0 {
  7876  		v, span, checkGCTrigger = c.nextFree(spc)
  7877  	}
  7878  	x := unsafe.Pointer(v)
  7879  	if needzero && span.needzero != 0 {
  7880  		memclrNoHeapPointers(x, elemsize)
  7881  	}
  7882  
  7883  	publicationBarrier()
  7884  
  7885  	if writeBarrier.enabled {
  7886  
  7887  		gcmarknewobject(span, uintptr(x))
  7888  	} else {
  7889  
  7890  		span.freeIndexForScan = span.freeindex
  7891  	}
  7892  
  7893  	c.nextSample -= int64(elemsize)
  7894  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7895  		profilealloc(mp, x, elemsize)
  7896  	}
  7897  	mp.mallocing = 0
  7898  	releasem(mp)
  7899  
  7900  	if checkGCTrigger {
  7901  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7902  			gcStart(t)
  7903  		}
  7904  	}
  7905  	if valgrindenabled {
  7906  		valgrindMalloc(x, size)
  7907  	}
  7908  
  7909  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7910  		if assistG := getg().m.curg; assistG != nil {
  7911  			assistG.gcAssistBytes -= int64(elemsize - size)
  7912  		}
  7913  	}
  7914  
  7915  	if debug.malloc {
  7916  		postMallocgcDebug(x, elemsize, typ)
  7917  	}
  7918  	return x
  7919  }
  7920  
  7921  func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7922  	if doubleCheckMalloc {
  7923  		if gcphase == _GCmarktermination {
  7924  			throw("mallocgc called with gcphase == _GCmarktermination")
  7925  		}
  7926  	}
  7927  
  7928  	lockRankMayQueueFinalizer()
  7929  
  7930  	if debug.malloc {
  7931  		if x := preMallocgcDebug(size, typ); x != nil {
  7932  			return x
  7933  		}
  7934  	}
  7935  
  7936  	if gcBlackenEnabled != 0 {
  7937  		deductAssistCredit(size)
  7938  	}
  7939  
  7940  	const sizeclass = 18
  7941  
  7942  	const elemsize = 256
  7943  
  7944  	mp := acquirem()
  7945  	if doubleCheckMalloc {
  7946  		doubleCheckSmallNoScan(typ, mp)
  7947  	}
  7948  	mp.mallocing = 1
  7949  
  7950  	checkGCTrigger := false
  7951  	c := getMCache(mp)
  7952  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7953  	span := c.alloc[spc]
  7954  
  7955  	var nextFreeFastResult gclinkptr
  7956  	if span.allocCache != 0 {
  7957  		theBit := sys.TrailingZeros64(span.allocCache)
  7958  		result := span.freeindex + uint16(theBit)
  7959  		if result < span.nelems {
  7960  			freeidx := result + 1
  7961  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7962  				span.allocCache >>= uint(theBit + 1)
  7963  				span.freeindex = freeidx
  7964  				span.allocCount++
  7965  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7966  					256 +
  7967  					span.base())
  7968  			}
  7969  		}
  7970  	}
  7971  	v := nextFreeFastResult
  7972  	if v == 0 {
  7973  		v, span, checkGCTrigger = c.nextFree(spc)
  7974  	}
  7975  	x := unsafe.Pointer(v)
  7976  	if needzero && span.needzero != 0 {
  7977  		memclrNoHeapPointers(x, elemsize)
  7978  	}
  7979  
  7980  	publicationBarrier()
  7981  
  7982  	if writeBarrier.enabled {
  7983  
  7984  		gcmarknewobject(span, uintptr(x))
  7985  	} else {
  7986  
  7987  		span.freeIndexForScan = span.freeindex
  7988  	}
  7989  
  7990  	c.nextSample -= int64(elemsize)
  7991  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7992  		profilealloc(mp, x, elemsize)
  7993  	}
  7994  	mp.mallocing = 0
  7995  	releasem(mp)
  7996  
  7997  	if checkGCTrigger {
  7998  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7999  			gcStart(t)
  8000  		}
  8001  	}
  8002  	if valgrindenabled {
  8003  		valgrindMalloc(x, size)
  8004  	}
  8005  
  8006  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8007  		if assistG := getg().m.curg; assistG != nil {
  8008  			assistG.gcAssistBytes -= int64(elemsize - size)
  8009  		}
  8010  	}
  8011  
  8012  	if debug.malloc {
  8013  		postMallocgcDebug(x, elemsize, typ)
  8014  	}
  8015  	return x
  8016  }
  8017  
  8018  func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8019  	if doubleCheckMalloc {
  8020  		if gcphase == _GCmarktermination {
  8021  			throw("mallocgc called with gcphase == _GCmarktermination")
  8022  		}
  8023  	}
  8024  
  8025  	lockRankMayQueueFinalizer()
  8026  
  8027  	if debug.malloc {
  8028  		if x := preMallocgcDebug(size, typ); x != nil {
  8029  			return x
  8030  		}
  8031  	}
  8032  
  8033  	if gcBlackenEnabled != 0 {
  8034  		deductAssistCredit(size)
  8035  	}
  8036  
  8037  	const sizeclass = 19
  8038  
  8039  	const elemsize = 288
  8040  
  8041  	mp := acquirem()
  8042  	if doubleCheckMalloc {
  8043  		doubleCheckSmallNoScan(typ, mp)
  8044  	}
  8045  	mp.mallocing = 1
  8046  
  8047  	checkGCTrigger := false
  8048  	c := getMCache(mp)
  8049  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8050  	span := c.alloc[spc]
  8051  
  8052  	var nextFreeFastResult gclinkptr
  8053  	if span.allocCache != 0 {
  8054  		theBit := sys.TrailingZeros64(span.allocCache)
  8055  		result := span.freeindex + uint16(theBit)
  8056  		if result < span.nelems {
  8057  			freeidx := result + 1
  8058  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8059  				span.allocCache >>= uint(theBit + 1)
  8060  				span.freeindex = freeidx
  8061  				span.allocCount++
  8062  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8063  					288 +
  8064  					span.base())
  8065  			}
  8066  		}
  8067  	}
  8068  	v := nextFreeFastResult
  8069  	if v == 0 {
  8070  		v, span, checkGCTrigger = c.nextFree(spc)
  8071  	}
  8072  	x := unsafe.Pointer(v)
  8073  	if needzero && span.needzero != 0 {
  8074  		memclrNoHeapPointers(x, elemsize)
  8075  	}
  8076  
  8077  	publicationBarrier()
  8078  
  8079  	if writeBarrier.enabled {
  8080  
  8081  		gcmarknewobject(span, uintptr(x))
  8082  	} else {
  8083  
  8084  		span.freeIndexForScan = span.freeindex
  8085  	}
  8086  
  8087  	c.nextSample -= int64(elemsize)
  8088  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8089  		profilealloc(mp, x, elemsize)
  8090  	}
  8091  	mp.mallocing = 0
  8092  	releasem(mp)
  8093  
  8094  	if checkGCTrigger {
  8095  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8096  			gcStart(t)
  8097  		}
  8098  	}
  8099  	if valgrindenabled {
  8100  		valgrindMalloc(x, size)
  8101  	}
  8102  
  8103  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8104  		if assistG := getg().m.curg; assistG != nil {
  8105  			assistG.gcAssistBytes -= int64(elemsize - size)
  8106  		}
  8107  	}
  8108  
  8109  	if debug.malloc {
  8110  		postMallocgcDebug(x, elemsize, typ)
  8111  	}
  8112  	return x
  8113  }
  8114  
  8115  func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8116  	if doubleCheckMalloc {
  8117  		if gcphase == _GCmarktermination {
  8118  			throw("mallocgc called with gcphase == _GCmarktermination")
  8119  		}
  8120  	}
  8121  
  8122  	lockRankMayQueueFinalizer()
  8123  
  8124  	if debug.malloc {
  8125  		if x := preMallocgcDebug(size, typ); x != nil {
  8126  			return x
  8127  		}
  8128  	}
  8129  
  8130  	if gcBlackenEnabled != 0 {
  8131  		deductAssistCredit(size)
  8132  	}
  8133  
  8134  	const sizeclass = 20
  8135  
  8136  	const elemsize = 320
  8137  
  8138  	mp := acquirem()
  8139  	if doubleCheckMalloc {
  8140  		doubleCheckSmallNoScan(typ, mp)
  8141  	}
  8142  	mp.mallocing = 1
  8143  
  8144  	checkGCTrigger := false
  8145  	c := getMCache(mp)
  8146  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8147  	span := c.alloc[spc]
  8148  
  8149  	var nextFreeFastResult gclinkptr
  8150  	if span.allocCache != 0 {
  8151  		theBit := sys.TrailingZeros64(span.allocCache)
  8152  		result := span.freeindex + uint16(theBit)
  8153  		if result < span.nelems {
  8154  			freeidx := result + 1
  8155  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8156  				span.allocCache >>= uint(theBit + 1)
  8157  				span.freeindex = freeidx
  8158  				span.allocCount++
  8159  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8160  					320 +
  8161  					span.base())
  8162  			}
  8163  		}
  8164  	}
  8165  	v := nextFreeFastResult
  8166  	if v == 0 {
  8167  		v, span, checkGCTrigger = c.nextFree(spc)
  8168  	}
  8169  	x := unsafe.Pointer(v)
  8170  	if needzero && span.needzero != 0 {
  8171  		memclrNoHeapPointers(x, elemsize)
  8172  	}
  8173  
  8174  	publicationBarrier()
  8175  
  8176  	if writeBarrier.enabled {
  8177  
  8178  		gcmarknewobject(span, uintptr(x))
  8179  	} else {
  8180  
  8181  		span.freeIndexForScan = span.freeindex
  8182  	}
  8183  
  8184  	c.nextSample -= int64(elemsize)
  8185  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8186  		profilealloc(mp, x, elemsize)
  8187  	}
  8188  	mp.mallocing = 0
  8189  	releasem(mp)
  8190  
  8191  	if checkGCTrigger {
  8192  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8193  			gcStart(t)
  8194  		}
  8195  	}
  8196  	if valgrindenabled {
  8197  		valgrindMalloc(x, size)
  8198  	}
  8199  
  8200  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8201  		if assistG := getg().m.curg; assistG != nil {
  8202  			assistG.gcAssistBytes -= int64(elemsize - size)
  8203  		}
  8204  	}
  8205  
  8206  	if debug.malloc {
  8207  		postMallocgcDebug(x, elemsize, typ)
  8208  	}
  8209  	return x
  8210  }
  8211  
  8212  func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8213  	if doubleCheckMalloc {
  8214  		if gcphase == _GCmarktermination {
  8215  			throw("mallocgc called with gcphase == _GCmarktermination")
  8216  		}
  8217  	}
  8218  
  8219  	lockRankMayQueueFinalizer()
  8220  
  8221  	if debug.malloc {
  8222  		if x := preMallocgcDebug(size, typ); x != nil {
  8223  			return x
  8224  		}
  8225  	}
  8226  
  8227  	if gcBlackenEnabled != 0 {
  8228  		deductAssistCredit(size)
  8229  	}
  8230  
  8231  	const sizeclass = 21
  8232  
  8233  	const elemsize = 352
  8234  
  8235  	mp := acquirem()
  8236  	if doubleCheckMalloc {
  8237  		doubleCheckSmallNoScan(typ, mp)
  8238  	}
  8239  	mp.mallocing = 1
  8240  
  8241  	checkGCTrigger := false
  8242  	c := getMCache(mp)
  8243  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8244  	span := c.alloc[spc]
  8245  
  8246  	var nextFreeFastResult gclinkptr
  8247  	if span.allocCache != 0 {
  8248  		theBit := sys.TrailingZeros64(span.allocCache)
  8249  		result := span.freeindex + uint16(theBit)
  8250  		if result < span.nelems {
  8251  			freeidx := result + 1
  8252  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8253  				span.allocCache >>= uint(theBit + 1)
  8254  				span.freeindex = freeidx
  8255  				span.allocCount++
  8256  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8257  					352 +
  8258  					span.base())
  8259  			}
  8260  		}
  8261  	}
  8262  	v := nextFreeFastResult
  8263  	if v == 0 {
  8264  		v, span, checkGCTrigger = c.nextFree(spc)
  8265  	}
  8266  	x := unsafe.Pointer(v)
  8267  	if needzero && span.needzero != 0 {
  8268  		memclrNoHeapPointers(x, elemsize)
  8269  	}
  8270  
  8271  	publicationBarrier()
  8272  
  8273  	if writeBarrier.enabled {
  8274  
  8275  		gcmarknewobject(span, uintptr(x))
  8276  	} else {
  8277  
  8278  		span.freeIndexForScan = span.freeindex
  8279  	}
  8280  
  8281  	c.nextSample -= int64(elemsize)
  8282  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8283  		profilealloc(mp, x, elemsize)
  8284  	}
  8285  	mp.mallocing = 0
  8286  	releasem(mp)
  8287  
  8288  	if checkGCTrigger {
  8289  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8290  			gcStart(t)
  8291  		}
  8292  	}
  8293  	if valgrindenabled {
  8294  		valgrindMalloc(x, size)
  8295  	}
  8296  
  8297  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8298  		if assistG := getg().m.curg; assistG != nil {
  8299  			assistG.gcAssistBytes -= int64(elemsize - size)
  8300  		}
  8301  	}
  8302  
  8303  	if debug.malloc {
  8304  		postMallocgcDebug(x, elemsize, typ)
  8305  	}
  8306  	return x
  8307  }
  8308  
  8309  func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8310  	if doubleCheckMalloc {
  8311  		if gcphase == _GCmarktermination {
  8312  			throw("mallocgc called with gcphase == _GCmarktermination")
  8313  		}
  8314  	}
  8315  
  8316  	lockRankMayQueueFinalizer()
  8317  
  8318  	if debug.malloc {
  8319  		if x := preMallocgcDebug(size, typ); x != nil {
  8320  			return x
  8321  		}
  8322  	}
  8323  
  8324  	if gcBlackenEnabled != 0 {
  8325  		deductAssistCredit(size)
  8326  	}
  8327  
  8328  	const sizeclass = 22
  8329  
  8330  	const elemsize = 384
  8331  
  8332  	mp := acquirem()
  8333  	if doubleCheckMalloc {
  8334  		doubleCheckSmallNoScan(typ, mp)
  8335  	}
  8336  	mp.mallocing = 1
  8337  
  8338  	checkGCTrigger := false
  8339  	c := getMCache(mp)
  8340  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8341  	span := c.alloc[spc]
  8342  
  8343  	var nextFreeFastResult gclinkptr
  8344  	if span.allocCache != 0 {
  8345  		theBit := sys.TrailingZeros64(span.allocCache)
  8346  		result := span.freeindex + uint16(theBit)
  8347  		if result < span.nelems {
  8348  			freeidx := result + 1
  8349  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8350  				span.allocCache >>= uint(theBit + 1)
  8351  				span.freeindex = freeidx
  8352  				span.allocCount++
  8353  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8354  					384 +
  8355  					span.base())
  8356  			}
  8357  		}
  8358  	}
  8359  	v := nextFreeFastResult
  8360  	if v == 0 {
  8361  		v, span, checkGCTrigger = c.nextFree(spc)
  8362  	}
  8363  	x := unsafe.Pointer(v)
  8364  	if needzero && span.needzero != 0 {
  8365  		memclrNoHeapPointers(x, elemsize)
  8366  	}
  8367  
  8368  	publicationBarrier()
  8369  
  8370  	if writeBarrier.enabled {
  8371  
  8372  		gcmarknewobject(span, uintptr(x))
  8373  	} else {
  8374  
  8375  		span.freeIndexForScan = span.freeindex
  8376  	}
  8377  
  8378  	c.nextSample -= int64(elemsize)
  8379  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8380  		profilealloc(mp, x, elemsize)
  8381  	}
  8382  	mp.mallocing = 0
  8383  	releasem(mp)
  8384  
  8385  	if checkGCTrigger {
  8386  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8387  			gcStart(t)
  8388  		}
  8389  	}
  8390  	if valgrindenabled {
  8391  		valgrindMalloc(x, size)
  8392  	}
  8393  
  8394  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8395  		if assistG := getg().m.curg; assistG != nil {
  8396  			assistG.gcAssistBytes -= int64(elemsize - size)
  8397  		}
  8398  	}
  8399  
  8400  	if debug.malloc {
  8401  		postMallocgcDebug(x, elemsize, typ)
  8402  	}
  8403  	return x
  8404  }
  8405  
  8406  func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8407  	if doubleCheckMalloc {
  8408  		if gcphase == _GCmarktermination {
  8409  			throw("mallocgc called with gcphase == _GCmarktermination")
  8410  		}
  8411  	}
  8412  
  8413  	lockRankMayQueueFinalizer()
  8414  
  8415  	if debug.malloc {
  8416  		if x := preMallocgcDebug(size, typ); x != nil {
  8417  			return x
  8418  		}
  8419  	}
  8420  
  8421  	if gcBlackenEnabled != 0 {
  8422  		deductAssistCredit(size)
  8423  	}
  8424  
  8425  	const sizeclass = 23
  8426  
  8427  	const elemsize = 416
  8428  
  8429  	mp := acquirem()
  8430  	if doubleCheckMalloc {
  8431  		doubleCheckSmallNoScan(typ, mp)
  8432  	}
  8433  	mp.mallocing = 1
  8434  
  8435  	checkGCTrigger := false
  8436  	c := getMCache(mp)
  8437  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8438  	span := c.alloc[spc]
  8439  
  8440  	var nextFreeFastResult gclinkptr
  8441  	if span.allocCache != 0 {
  8442  		theBit := sys.TrailingZeros64(span.allocCache)
  8443  		result := span.freeindex + uint16(theBit)
  8444  		if result < span.nelems {
  8445  			freeidx := result + 1
  8446  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8447  				span.allocCache >>= uint(theBit + 1)
  8448  				span.freeindex = freeidx
  8449  				span.allocCount++
  8450  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8451  					416 +
  8452  					span.base())
  8453  			}
  8454  		}
  8455  	}
  8456  	v := nextFreeFastResult
  8457  	if v == 0 {
  8458  		v, span, checkGCTrigger = c.nextFree(spc)
  8459  	}
  8460  	x := unsafe.Pointer(v)
  8461  	if needzero && span.needzero != 0 {
  8462  		memclrNoHeapPointers(x, elemsize)
  8463  	}
  8464  
  8465  	publicationBarrier()
  8466  
  8467  	if writeBarrier.enabled {
  8468  
  8469  		gcmarknewobject(span, uintptr(x))
  8470  	} else {
  8471  
  8472  		span.freeIndexForScan = span.freeindex
  8473  	}
  8474  
  8475  	c.nextSample -= int64(elemsize)
  8476  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8477  		profilealloc(mp, x, elemsize)
  8478  	}
  8479  	mp.mallocing = 0
  8480  	releasem(mp)
  8481  
  8482  	if checkGCTrigger {
  8483  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8484  			gcStart(t)
  8485  		}
  8486  	}
  8487  	if valgrindenabled {
  8488  		valgrindMalloc(x, size)
  8489  	}
  8490  
  8491  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8492  		if assistG := getg().m.curg; assistG != nil {
  8493  			assistG.gcAssistBytes -= int64(elemsize - size)
  8494  		}
  8495  	}
  8496  
  8497  	if debug.malloc {
  8498  		postMallocgcDebug(x, elemsize, typ)
  8499  	}
  8500  	return x
  8501  }
  8502  
  8503  func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8504  	if doubleCheckMalloc {
  8505  		if gcphase == _GCmarktermination {
  8506  			throw("mallocgc called with gcphase == _GCmarktermination")
  8507  		}
  8508  	}
  8509  
  8510  	lockRankMayQueueFinalizer()
  8511  
  8512  	if debug.malloc {
  8513  		if x := preMallocgcDebug(size, typ); x != nil {
  8514  			return x
  8515  		}
  8516  	}
  8517  
  8518  	if gcBlackenEnabled != 0 {
  8519  		deductAssistCredit(size)
  8520  	}
  8521  
  8522  	const sizeclass = 24
  8523  
  8524  	const elemsize = 448
  8525  
  8526  	mp := acquirem()
  8527  	if doubleCheckMalloc {
  8528  		doubleCheckSmallNoScan(typ, mp)
  8529  	}
  8530  	mp.mallocing = 1
  8531  
  8532  	checkGCTrigger := false
  8533  	c := getMCache(mp)
  8534  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8535  	span := c.alloc[spc]
  8536  
  8537  	var nextFreeFastResult gclinkptr
  8538  	if span.allocCache != 0 {
  8539  		theBit := sys.TrailingZeros64(span.allocCache)
  8540  		result := span.freeindex + uint16(theBit)
  8541  		if result < span.nelems {
  8542  			freeidx := result + 1
  8543  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8544  				span.allocCache >>= uint(theBit + 1)
  8545  				span.freeindex = freeidx
  8546  				span.allocCount++
  8547  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8548  					448 +
  8549  					span.base())
  8550  			}
  8551  		}
  8552  	}
  8553  	v := nextFreeFastResult
  8554  	if v == 0 {
  8555  		v, span, checkGCTrigger = c.nextFree(spc)
  8556  	}
  8557  	x := unsafe.Pointer(v)
  8558  	if needzero && span.needzero != 0 {
  8559  		memclrNoHeapPointers(x, elemsize)
  8560  	}
  8561  
  8562  	publicationBarrier()
  8563  
  8564  	if writeBarrier.enabled {
  8565  
  8566  		gcmarknewobject(span, uintptr(x))
  8567  	} else {
  8568  
  8569  		span.freeIndexForScan = span.freeindex
  8570  	}
  8571  
  8572  	c.nextSample -= int64(elemsize)
  8573  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8574  		profilealloc(mp, x, elemsize)
  8575  	}
  8576  	mp.mallocing = 0
  8577  	releasem(mp)
  8578  
  8579  	if checkGCTrigger {
  8580  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8581  			gcStart(t)
  8582  		}
  8583  	}
  8584  	if valgrindenabled {
  8585  		valgrindMalloc(x, size)
  8586  	}
  8587  
  8588  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8589  		if assistG := getg().m.curg; assistG != nil {
  8590  			assistG.gcAssistBytes -= int64(elemsize - size)
  8591  		}
  8592  	}
  8593  
  8594  	if debug.malloc {
  8595  		postMallocgcDebug(x, elemsize, typ)
  8596  	}
  8597  	return x
  8598  }
  8599  
  8600  func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8601  	if doubleCheckMalloc {
  8602  		if gcphase == _GCmarktermination {
  8603  			throw("mallocgc called with gcphase == _GCmarktermination")
  8604  		}
  8605  	}
  8606  
  8607  	lockRankMayQueueFinalizer()
  8608  
  8609  	if debug.malloc {
  8610  		if x := preMallocgcDebug(size, typ); x != nil {
  8611  			return x
  8612  		}
  8613  	}
  8614  
  8615  	if gcBlackenEnabled != 0 {
  8616  		deductAssistCredit(size)
  8617  	}
  8618  
  8619  	const sizeclass = 25
  8620  
  8621  	const elemsize = 480
  8622  
  8623  	mp := acquirem()
  8624  	if doubleCheckMalloc {
  8625  		doubleCheckSmallNoScan(typ, mp)
  8626  	}
  8627  	mp.mallocing = 1
  8628  
  8629  	checkGCTrigger := false
  8630  	c := getMCache(mp)
  8631  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8632  	span := c.alloc[spc]
  8633  
  8634  	var nextFreeFastResult gclinkptr
  8635  	if span.allocCache != 0 {
  8636  		theBit := sys.TrailingZeros64(span.allocCache)
  8637  		result := span.freeindex + uint16(theBit)
  8638  		if result < span.nelems {
  8639  			freeidx := result + 1
  8640  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8641  				span.allocCache >>= uint(theBit + 1)
  8642  				span.freeindex = freeidx
  8643  				span.allocCount++
  8644  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8645  					480 +
  8646  					span.base())
  8647  			}
  8648  		}
  8649  	}
  8650  	v := nextFreeFastResult
  8651  	if v == 0 {
  8652  		v, span, checkGCTrigger = c.nextFree(spc)
  8653  	}
  8654  	x := unsafe.Pointer(v)
  8655  	if needzero && span.needzero != 0 {
  8656  		memclrNoHeapPointers(x, elemsize)
  8657  	}
  8658  
  8659  	publicationBarrier()
  8660  
  8661  	if writeBarrier.enabled {
  8662  
  8663  		gcmarknewobject(span, uintptr(x))
  8664  	} else {
  8665  
  8666  		span.freeIndexForScan = span.freeindex
  8667  	}
  8668  
  8669  	c.nextSample -= int64(elemsize)
  8670  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8671  		profilealloc(mp, x, elemsize)
  8672  	}
  8673  	mp.mallocing = 0
  8674  	releasem(mp)
  8675  
  8676  	if checkGCTrigger {
  8677  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8678  			gcStart(t)
  8679  		}
  8680  	}
  8681  	if valgrindenabled {
  8682  		valgrindMalloc(x, size)
  8683  	}
  8684  
  8685  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8686  		if assistG := getg().m.curg; assistG != nil {
  8687  			assistG.gcAssistBytes -= int64(elemsize - size)
  8688  		}
  8689  	}
  8690  
  8691  	if debug.malloc {
  8692  		postMallocgcDebug(x, elemsize, typ)
  8693  	}
  8694  	return x
  8695  }
  8696  
  8697  func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8698  	if doubleCheckMalloc {
  8699  		if gcphase == _GCmarktermination {
  8700  			throw("mallocgc called with gcphase == _GCmarktermination")
  8701  		}
  8702  	}
  8703  
  8704  	lockRankMayQueueFinalizer()
  8705  
  8706  	if debug.malloc {
  8707  		if x := preMallocgcDebug(size, typ); x != nil {
  8708  			return x
  8709  		}
  8710  	}
  8711  
  8712  	if gcBlackenEnabled != 0 {
  8713  		deductAssistCredit(size)
  8714  	}
  8715  
  8716  	const sizeclass = 26
  8717  
  8718  	const elemsize = 512
  8719  
  8720  	mp := acquirem()
  8721  	if doubleCheckMalloc {
  8722  		doubleCheckSmallNoScan(typ, mp)
  8723  	}
  8724  	mp.mallocing = 1
  8725  
  8726  	checkGCTrigger := false
  8727  	c := getMCache(mp)
  8728  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8729  	span := c.alloc[spc]
  8730  
  8731  	var nextFreeFastResult gclinkptr
  8732  	if span.allocCache != 0 {
  8733  		theBit := sys.TrailingZeros64(span.allocCache)
  8734  		result := span.freeindex + uint16(theBit)
  8735  		if result < span.nelems {
  8736  			freeidx := result + 1
  8737  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8738  				span.allocCache >>= uint(theBit + 1)
  8739  				span.freeindex = freeidx
  8740  				span.allocCount++
  8741  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8742  					512 +
  8743  					span.base())
  8744  			}
  8745  		}
  8746  	}
  8747  	v := nextFreeFastResult
  8748  	if v == 0 {
  8749  		v, span, checkGCTrigger = c.nextFree(spc)
  8750  	}
  8751  	x := unsafe.Pointer(v)
  8752  	if needzero && span.needzero != 0 {
  8753  		memclrNoHeapPointers(x, elemsize)
  8754  	}
  8755  
  8756  	publicationBarrier()
  8757  
  8758  	if writeBarrier.enabled {
  8759  
  8760  		gcmarknewobject(span, uintptr(x))
  8761  	} else {
  8762  
  8763  		span.freeIndexForScan = span.freeindex
  8764  	}
  8765  
  8766  	c.nextSample -= int64(elemsize)
  8767  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8768  		profilealloc(mp, x, elemsize)
  8769  	}
  8770  	mp.mallocing = 0
  8771  	releasem(mp)
  8772  
  8773  	if checkGCTrigger {
  8774  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8775  			gcStart(t)
  8776  		}
  8777  	}
  8778  	if valgrindenabled {
  8779  		valgrindMalloc(x, size)
  8780  	}
  8781  
  8782  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8783  		if assistG := getg().m.curg; assistG != nil {
  8784  			assistG.gcAssistBytes -= int64(elemsize - size)
  8785  		}
  8786  	}
  8787  
  8788  	if debug.malloc {
  8789  		postMallocgcDebug(x, elemsize, typ)
  8790  	}
  8791  	return x
  8792  }
  8793  

View as plain text