Source file src/runtime/map_noswiss.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build !goexperiment.swissmap
     6  
     7  package runtime
     8  
     9  // This file contains the implementation of Go's map type.
    10  //
    11  // A map is just a hash table. The data is arranged
    12  // into an array of buckets. Each bucket contains up to
    13  // 8 key/elem pairs. The low-order bits of the hash are
    14  // used to select a bucket. Each bucket contains a few
    15  // high-order bits of each hash to distinguish the entries
    16  // within a single bucket.
    17  //
    18  // If more than 8 keys hash to a bucket, we chain on
    19  // extra buckets.
    20  //
    21  // When the hashtable grows, we allocate a new array
    22  // of buckets twice as big. Buckets are incrementally
    23  // copied from the old bucket array to the new bucket array.
    24  //
    25  // Map iterators walk through the array of buckets and
    26  // return the keys in walk order (bucket #, then overflow
    27  // chain order, then bucket index).  To maintain iteration
    28  // semantics, we never move keys within their bucket (if
    29  // we did, keys might be returned 0 or 2 times).  When
    30  // growing the table, iterators remain iterating through the
    31  // old table and must check the new table if the bucket
    32  // they are iterating through has been moved ("evacuated")
    33  // to the new table.
    34  
    35  // Picking loadFactor: too large and we have lots of overflow
    36  // buckets, too small and we waste a lot of space. I wrote
    37  // a simple program to check some stats for different loads:
    38  // (64-bit, 8 byte keys and elems)
    39  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    40  //        4.00         2.13        20.77         3.00         4.00
    41  //        4.50         4.05        17.30         3.25         4.50
    42  //        5.00         6.85        14.77         3.50         5.00
    43  //        5.50        10.55        12.94         3.75         5.50
    44  //        6.00        15.27        11.67         4.00         6.00
    45  //        6.50        20.90        10.79         4.25         6.50
    46  //        7.00        27.14        10.15         4.50         7.00
    47  //        7.50        34.03         9.73         4.75         7.50
    48  //        8.00        41.10         9.40         5.00         8.00
    49  //
    50  // %overflow   = percentage of buckets which have an overflow bucket
    51  // bytes/entry = overhead bytes used per key/elem pair
    52  // hitprobe    = # of entries to check when looking up a present key
    53  // missprobe   = # of entries to check when looking up an absent key
    54  //
    55  // Keep in mind this data is for maximally loaded tables, i.e. just
    56  // before the table grows. Typical tables will be somewhat less loaded.
    57  
    58  import (
    59  	"internal/abi"
    60  	"internal/goarch"
    61  	"internal/runtime/atomic"
    62  	"internal/runtime/maps"
    63  	"internal/runtime/math"
    64  	"internal/runtime/sys"
    65  	"unsafe"
    66  )
    67  
    68  type maptype = abi.OldMapType
    69  
    70  const (
    71  	// Maximum number of key/elem pairs a bucket can hold.
    72  	bucketCntBits = abi.OldMapBucketCountBits
    73  
    74  	// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
    75  	// Because of minimum alignment rules, bucketCnt is known to be at least 8.
    76  	// Represent as loadFactorNum/loadFactorDen, to allow integer math.
    77  	loadFactorDen = 2
    78  	loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16
    79  
    80  	// data offset should be the size of the bmap struct, but needs to be
    81  	// aligned correctly. For amd64p32 this means 64-bit alignment
    82  	// even though pointers are 32 bit.
    83  	dataOffset = unsafe.Offsetof(struct {
    84  		b bmap
    85  		v int64
    86  	}{}.v)
    87  
    88  	// Possible tophash values. We reserve a few possibilities for special marks.
    89  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    90  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    91  	// during map writes and thus no one else can observe the map during that time).
    92  	emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
    93  	emptyOne       = 1 // this cell is empty
    94  	evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
    95  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    96  	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
    97  	minTopHash     = 5 // minimum tophash for a normal filled cell.
    98  
    99  	// flags
   100  	iterator     = 1 // there may be an iterator using buckets
   101  	oldIterator  = 2 // there may be an iterator using oldbuckets
   102  	hashWriting  = 4 // a goroutine is writing to the map
   103  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
   104  
   105  	// sentinel bucket ID for iterator checks
   106  	noCheck = 1<<(8*goarch.PtrSize) - 1
   107  )
   108  
   109  // isEmpty reports whether the given tophash array entry represents an empty bucket entry.
   110  func isEmpty(x uint8) bool {
   111  	return x <= emptyOne
   112  }
   113  
   114  // A header for a Go map.
   115  type hmap struct {
   116  	// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
   117  	// Make sure this stays in sync with the compiler's definition.
   118  	count     int // # live cells == size of map.  Must be first (used by len() builtin)
   119  	flags     uint8
   120  	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   121  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
   122  	hash0     uint32 // hash seed
   123  
   124  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   125  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   126  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   127  	clearSeq   uint64
   128  
   129  	extra *mapextra // optional fields
   130  }
   131  
   132  // mapextra holds fields that are not present on all maps.
   133  type mapextra struct {
   134  	// If both key and elem do not contain pointers and are inline, then we mark bucket
   135  	// type as containing no pointers. This avoids scanning such maps.
   136  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   137  	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
   138  	// overflow and oldoverflow are only used if key and elem do not contain pointers.
   139  	// overflow contains overflow buckets for hmap.buckets.
   140  	// oldoverflow contains overflow buckets for hmap.oldbuckets.
   141  	// The indirection allows to store a pointer to the slice in hiter.
   142  	overflow    *[]*bmap
   143  	oldoverflow *[]*bmap
   144  
   145  	// nextOverflow holds a pointer to a free overflow bucket.
   146  	nextOverflow *bmap
   147  }
   148  
   149  // A bucket for a Go map.
   150  type bmap struct {
   151  	// tophash generally contains the top byte of the hash value
   152  	// for each key in this bucket. If tophash[0] < minTopHash,
   153  	// tophash[0] is a bucket evacuation state instead.
   154  	tophash [abi.OldMapBucketCount]uint8
   155  	// Followed by bucketCnt keys and then bucketCnt elems.
   156  	// NOTE: packing all the keys together and then all the elems together makes the
   157  	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
   158  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   159  	// Followed by an overflow pointer.
   160  }
   161  
   162  // A hash iteration structure.
   163  // If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
   164  // and reflect/value.go to match the layout of this structure.
   165  type hiter struct {
   166  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
   167  	elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
   168  	t           *maptype
   169  	h           *hmap
   170  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   171  	bptr        *bmap          // current bucket
   172  	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
   173  	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
   174  	startBucket uintptr        // bucket iteration started at
   175  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   176  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   177  	B           uint8
   178  	i           uint8
   179  	bucket      uintptr
   180  	checkBucket uintptr
   181  	clearSeq    uint64
   182  }
   183  
   184  // bucketShift returns 1<<b, optimized for code generation.
   185  func bucketShift(b uint8) uintptr {
   186  	// Masking the shift amount allows overflow checks to be elided.
   187  	return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
   188  }
   189  
   190  // bucketMask returns 1<<b - 1, optimized for code generation.
   191  func bucketMask(b uint8) uintptr {
   192  	return bucketShift(b) - 1
   193  }
   194  
   195  // tophash calculates the tophash value for hash.
   196  func tophash(hash uintptr) uint8 {
   197  	top := uint8(hash >> (goarch.PtrSize*8 - 8))
   198  	if top < minTopHash {
   199  		top += minTopHash
   200  	}
   201  	return top
   202  }
   203  
   204  func evacuated(b *bmap) bool {
   205  	h := b.tophash[0]
   206  	return h > emptyOne && h < minTopHash
   207  }
   208  
   209  func (b *bmap) overflow(t *maptype) *bmap {
   210  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
   211  }
   212  
   213  func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
   214  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
   215  }
   216  
   217  func (b *bmap) keys() unsafe.Pointer {
   218  	return add(unsafe.Pointer(b), dataOffset)
   219  }
   220  
   221  // incrnoverflow increments h.noverflow.
   222  // noverflow counts the number of overflow buckets.
   223  // This is used to trigger same-size map growth.
   224  // See also tooManyOverflowBuckets.
   225  // To keep hmap small, noverflow is a uint16.
   226  // When there are few buckets, noverflow is an exact count.
   227  // When there are many buckets, noverflow is an approximate count.
   228  func (h *hmap) incrnoverflow() {
   229  	// We trigger same-size map growth if there are
   230  	// as many overflow buckets as buckets.
   231  	// We need to be able to count to 1<<h.B.
   232  	if h.B < 16 {
   233  		h.noverflow++
   234  		return
   235  	}
   236  	// Increment with probability 1/(1<<(h.B-15)).
   237  	// When we reach 1<<15 - 1, we will have approximately
   238  	// as many overflow buckets as buckets.
   239  	mask := uint32(1)<<(h.B-15) - 1
   240  	// Example: if h.B == 18, then mask == 7,
   241  	// and rand() & 7 == 0 with probability 1/8.
   242  	if uint32(rand())&mask == 0 {
   243  		h.noverflow++
   244  	}
   245  }
   246  
   247  func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
   248  	var ovf *bmap
   249  	if h.extra != nil && h.extra.nextOverflow != nil {
   250  		// We have preallocated overflow buckets available.
   251  		// See makeBucketArray for more details.
   252  		ovf = h.extra.nextOverflow
   253  		if ovf.overflow(t) == nil {
   254  			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
   255  			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
   256  		} else {
   257  			// This is the last preallocated overflow bucket.
   258  			// Reset the overflow pointer on this bucket,
   259  			// which was set to a non-nil sentinel value.
   260  			ovf.setoverflow(t, nil)
   261  			h.extra.nextOverflow = nil
   262  		}
   263  	} else {
   264  		ovf = (*bmap)(newobject(t.Bucket))
   265  	}
   266  	h.incrnoverflow()
   267  	if !t.Bucket.Pointers() {
   268  		h.createOverflow()
   269  		*h.extra.overflow = append(*h.extra.overflow, ovf)
   270  	}
   271  	b.setoverflow(t, ovf)
   272  	return ovf
   273  }
   274  
   275  func (h *hmap) createOverflow() {
   276  	if h.extra == nil {
   277  		h.extra = new(mapextra)
   278  	}
   279  	if h.extra.overflow == nil {
   280  		h.extra.overflow = new([]*bmap)
   281  	}
   282  }
   283  
   284  func makemap64(t *maptype, hint int64, h *hmap) *hmap {
   285  	if int64(int(hint)) != hint {
   286  		hint = 0
   287  	}
   288  	return makemap(t, int(hint), h)
   289  }
   290  
   291  // makemap_small implements Go map creation for make(map[k]v) and
   292  // make(map[k]v, hint) when hint is known to be at most bucketCnt
   293  // at compile time and the map needs to be allocated on the heap.
   294  //
   295  // makemap_small should be an internal detail,
   296  // but widely used packages access it using linkname.
   297  // Notable members of the hall of shame include:
   298  //   - github.com/bytedance/sonic
   299  //
   300  // Do not remove or change the type signature.
   301  // See go.dev/issue/67401.
   302  //
   303  //go:linkname makemap_small
   304  func makemap_small() *hmap {
   305  	h := new(hmap)
   306  	h.hash0 = uint32(rand())
   307  	return h
   308  }
   309  
   310  // makemap implements Go map creation for make(map[k]v, hint).
   311  // If the compiler has determined that the map or the first bucket
   312  // can be created on the stack, h and/or bucket may be non-nil.
   313  // If h != nil, the map can be created directly in h.
   314  // If h.buckets != nil, bucket pointed to can be used as the first bucket.
   315  //
   316  // makemap should be an internal detail,
   317  // but widely used packages access it using linkname.
   318  // Notable members of the hall of shame include:
   319  //   - github.com/ugorji/go/codec
   320  //
   321  // Do not remove or change the type signature.
   322  // See go.dev/issue/67401.
   323  //
   324  //go:linkname makemap
   325  func makemap(t *maptype, hint int, h *hmap) *hmap {
   326  	mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
   327  	if overflow || mem > maxAlloc {
   328  		hint = 0
   329  	}
   330  
   331  	// initialize Hmap
   332  	if h == nil {
   333  		h = new(hmap)
   334  	}
   335  	h.hash0 = uint32(rand())
   336  
   337  	// Find the size parameter B which will hold the requested # of elements.
   338  	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
   339  	B := uint8(0)
   340  	for overLoadFactor(hint, B) {
   341  		B++
   342  	}
   343  	h.B = B
   344  
   345  	// allocate initial hash table
   346  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   347  	// If hint is large zeroing this memory could take a while.
   348  	if h.B != 0 {
   349  		var nextOverflow *bmap
   350  		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
   351  		if nextOverflow != nil {
   352  			h.extra = new(mapextra)
   353  			h.extra.nextOverflow = nextOverflow
   354  		}
   355  	}
   356  
   357  	return h
   358  }
   359  
   360  // makeBucketArray initializes a backing array for map buckets.
   361  // 1<<b is the minimum number of buckets to allocate.
   362  // dirtyalloc should either be nil or a bucket array previously
   363  // allocated by makeBucketArray with the same t and b parameters.
   364  // If dirtyalloc is nil a new backing array will be alloced and
   365  // otherwise dirtyalloc will be cleared and reused as backing array.
   366  func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
   367  	base := bucketShift(b)
   368  	nbuckets := base
   369  	// For small b, overflow buckets are unlikely.
   370  	// Avoid the overhead of the calculation.
   371  	if b >= 4 {
   372  		// Add on the estimated number of overflow buckets
   373  		// required to insert the median number of elements
   374  		// used with this value of b.
   375  		nbuckets += bucketShift(b - 4)
   376  		sz := t.Bucket.Size_ * nbuckets
   377  		up := roundupsize(sz, !t.Bucket.Pointers())
   378  		if up != sz {
   379  			nbuckets = up / t.Bucket.Size_
   380  		}
   381  	}
   382  
   383  	if dirtyalloc == nil {
   384  		buckets = newarray(t.Bucket, int(nbuckets))
   385  	} else {
   386  		// dirtyalloc was previously generated by
   387  		// the above newarray(t.Bucket, int(nbuckets))
   388  		// but may not be empty.
   389  		buckets = dirtyalloc
   390  		size := t.Bucket.Size_ * nbuckets
   391  		if t.Bucket.Pointers() {
   392  			memclrHasPointers(buckets, size)
   393  		} else {
   394  			memclrNoHeapPointers(buckets, size)
   395  		}
   396  	}
   397  
   398  	if base != nbuckets {
   399  		// We preallocated some overflow buckets.
   400  		// To keep the overhead of tracking these overflow buckets to a minimum,
   401  		// we use the convention that if a preallocated overflow bucket's overflow
   402  		// pointer is nil, then there are more available by bumping the pointer.
   403  		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
   404  		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
   405  		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
   406  		last.setoverflow(t, (*bmap)(buckets))
   407  	}
   408  	return buckets, nextOverflow
   409  }
   410  
   411  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   412  // it will return a reference to the zero object for the elem type if
   413  // the key is not in the map.
   414  // NOTE: The returned pointer may keep the whole map live, so don't
   415  // hold onto it for very long.
   416  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   417  	if raceenabled && h != nil {
   418  		callerpc := sys.GetCallerPC()
   419  		pc := abi.FuncPCABIInternal(mapaccess1)
   420  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   421  		raceReadObjectPC(t.Key, key, callerpc, pc)
   422  	}
   423  	if msanenabled && h != nil {
   424  		msanread(key, t.Key.Size_)
   425  	}
   426  	if asanenabled && h != nil {
   427  		asanread(key, t.Key.Size_)
   428  	}
   429  	if h == nil || h.count == 0 {
   430  		if err := maps.OldMapKeyError(t, key); err != nil {
   431  			panic(err) // see issue 23734
   432  		}
   433  		return unsafe.Pointer(&zeroVal[0])
   434  	}
   435  	if h.flags&hashWriting != 0 {
   436  		fatal("concurrent map read and map write")
   437  	}
   438  	hash := t.Hasher(key, uintptr(h.hash0))
   439  	m := bucketMask(h.B)
   440  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   441  	if c := h.oldbuckets; c != nil {
   442  		if !h.sameSizeGrow() {
   443  			// There used to be half as many buckets; mask down one more power of two.
   444  			m >>= 1
   445  		}
   446  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   447  		if !evacuated(oldb) {
   448  			b = oldb
   449  		}
   450  	}
   451  	top := tophash(hash)
   452  bucketloop:
   453  	for ; b != nil; b = b.overflow(t) {
   454  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   455  			if b.tophash[i] != top {
   456  				if b.tophash[i] == emptyRest {
   457  					break bucketloop
   458  				}
   459  				continue
   460  			}
   461  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   462  			if t.IndirectKey() {
   463  				k = *((*unsafe.Pointer)(k))
   464  			}
   465  			if t.Key.Equal(key, k) {
   466  				e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   467  				if t.IndirectElem() {
   468  					e = *((*unsafe.Pointer)(e))
   469  				}
   470  				return e
   471  			}
   472  		}
   473  	}
   474  	return unsafe.Pointer(&zeroVal[0])
   475  }
   476  
   477  // mapaccess2 should be an internal detail,
   478  // but widely used packages access it using linkname.
   479  // Notable members of the hall of shame include:
   480  //   - github.com/ugorji/go/codec
   481  //
   482  // Do not remove or change the type signature.
   483  // See go.dev/issue/67401.
   484  //
   485  //go:linkname mapaccess2
   486  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   487  	if raceenabled && h != nil {
   488  		callerpc := sys.GetCallerPC()
   489  		pc := abi.FuncPCABIInternal(mapaccess2)
   490  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   491  		raceReadObjectPC(t.Key, key, callerpc, pc)
   492  	}
   493  	if msanenabled && h != nil {
   494  		msanread(key, t.Key.Size_)
   495  	}
   496  	if asanenabled && h != nil {
   497  		asanread(key, t.Key.Size_)
   498  	}
   499  	if h == nil || h.count == 0 {
   500  		if err := maps.OldMapKeyError(t, key); err != nil {
   501  			panic(err) // see issue 23734
   502  		}
   503  		return unsafe.Pointer(&zeroVal[0]), false
   504  	}
   505  	if h.flags&hashWriting != 0 {
   506  		fatal("concurrent map read and map write")
   507  	}
   508  	hash := t.Hasher(key, uintptr(h.hash0))
   509  	m := bucketMask(h.B)
   510  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   511  	if c := h.oldbuckets; c != nil {
   512  		if !h.sameSizeGrow() {
   513  			// There used to be half as many buckets; mask down one more power of two.
   514  			m >>= 1
   515  		}
   516  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   517  		if !evacuated(oldb) {
   518  			b = oldb
   519  		}
   520  	}
   521  	top := tophash(hash)
   522  bucketloop:
   523  	for ; b != nil; b = b.overflow(t) {
   524  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   525  			if b.tophash[i] != top {
   526  				if b.tophash[i] == emptyRest {
   527  					break bucketloop
   528  				}
   529  				continue
   530  			}
   531  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   532  			if t.IndirectKey() {
   533  				k = *((*unsafe.Pointer)(k))
   534  			}
   535  			if t.Key.Equal(key, k) {
   536  				e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   537  				if t.IndirectElem() {
   538  					e = *((*unsafe.Pointer)(e))
   539  				}
   540  				return e, true
   541  			}
   542  		}
   543  	}
   544  	return unsafe.Pointer(&zeroVal[0]), false
   545  }
   546  
   547  // returns both key and elem. Used by map iterator.
   548  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   549  	if h == nil || h.count == 0 {
   550  		return nil, nil
   551  	}
   552  	hash := t.Hasher(key, uintptr(h.hash0))
   553  	m := bucketMask(h.B)
   554  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   555  	if c := h.oldbuckets; c != nil {
   556  		if !h.sameSizeGrow() {
   557  			// There used to be half as many buckets; mask down one more power of two.
   558  			m >>= 1
   559  		}
   560  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   561  		if !evacuated(oldb) {
   562  			b = oldb
   563  		}
   564  	}
   565  	top := tophash(hash)
   566  bucketloop:
   567  	for ; b != nil; b = b.overflow(t) {
   568  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   569  			if b.tophash[i] != top {
   570  				if b.tophash[i] == emptyRest {
   571  					break bucketloop
   572  				}
   573  				continue
   574  			}
   575  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   576  			if t.IndirectKey() {
   577  				k = *((*unsafe.Pointer)(k))
   578  			}
   579  			if t.Key.Equal(key, k) {
   580  				e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   581  				if t.IndirectElem() {
   582  					e = *((*unsafe.Pointer)(e))
   583  				}
   584  				return k, e
   585  			}
   586  		}
   587  	}
   588  	return nil, nil
   589  }
   590  
   591  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   592  	e := mapaccess1(t, h, key)
   593  	if e == unsafe.Pointer(&zeroVal[0]) {
   594  		return zero
   595  	}
   596  	return e
   597  }
   598  
   599  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   600  	e := mapaccess1(t, h, key)
   601  	if e == unsafe.Pointer(&zeroVal[0]) {
   602  		return zero, false
   603  	}
   604  	return e, true
   605  }
   606  
   607  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
   608  //
   609  // mapassign should be an internal detail,
   610  // but widely used packages access it using linkname.
   611  // Notable members of the hall of shame include:
   612  //   - github.com/bytedance/sonic
   613  //   - github.com/RomiChan/protobuf
   614  //   - github.com/segmentio/encoding
   615  //   - github.com/ugorji/go/codec
   616  //
   617  // Do not remove or change the type signature.
   618  // See go.dev/issue/67401.
   619  //
   620  //go:linkname mapassign
   621  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   622  	if h == nil {
   623  		panic(plainError("assignment to entry in nil map"))
   624  	}
   625  	if raceenabled {
   626  		callerpc := sys.GetCallerPC()
   627  		pc := abi.FuncPCABIInternal(mapassign)
   628  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   629  		raceReadObjectPC(t.Key, key, callerpc, pc)
   630  	}
   631  	if msanenabled {
   632  		msanread(key, t.Key.Size_)
   633  	}
   634  	if asanenabled {
   635  		asanread(key, t.Key.Size_)
   636  	}
   637  	if h.flags&hashWriting != 0 {
   638  		fatal("concurrent map writes")
   639  	}
   640  	hash := t.Hasher(key, uintptr(h.hash0))
   641  
   642  	// Set hashWriting after calling t.hasher, since t.hasher may panic,
   643  	// in which case we have not actually done a write.
   644  	h.flags ^= hashWriting
   645  
   646  	if h.buckets == nil {
   647  		h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
   648  	}
   649  
   650  again:
   651  	bucket := hash & bucketMask(h.B)
   652  	if h.growing() {
   653  		growWork(t, h, bucket)
   654  	}
   655  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   656  	top := tophash(hash)
   657  
   658  	var inserti *uint8
   659  	var insertk unsafe.Pointer
   660  	var elem unsafe.Pointer
   661  bucketloop:
   662  	for {
   663  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   664  			if b.tophash[i] != top {
   665  				if isEmpty(b.tophash[i]) && inserti == nil {
   666  					inserti = &b.tophash[i]
   667  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   668  					elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   669  				}
   670  				if b.tophash[i] == emptyRest {
   671  					break bucketloop
   672  				}
   673  				continue
   674  			}
   675  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   676  			if t.IndirectKey() {
   677  				k = *((*unsafe.Pointer)(k))
   678  			}
   679  			if !t.Key.Equal(key, k) {
   680  				continue
   681  			}
   682  			// already have a mapping for key. Update it.
   683  			if t.NeedKeyUpdate() {
   684  				typedmemmove(t.Key, k, key)
   685  			}
   686  			elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   687  			goto done
   688  		}
   689  		ovf := b.overflow(t)
   690  		if ovf == nil {
   691  			break
   692  		}
   693  		b = ovf
   694  	}
   695  
   696  	// Did not find mapping for key. Allocate new cell & add entry.
   697  
   698  	// If we hit the max load factor or we have too many overflow buckets,
   699  	// and we're not already in the middle of growing, start growing.
   700  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   701  		hashGrow(t, h)
   702  		goto again // Growing the table invalidates everything, so try again
   703  	}
   704  
   705  	if inserti == nil {
   706  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   707  		newb := h.newoverflow(t, b)
   708  		inserti = &newb.tophash[0]
   709  		insertk = add(unsafe.Pointer(newb), dataOffset)
   710  		elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize))
   711  	}
   712  
   713  	// store new key/elem at insert position
   714  	if t.IndirectKey() {
   715  		kmem := newobject(t.Key)
   716  		*(*unsafe.Pointer)(insertk) = kmem
   717  		insertk = kmem
   718  	}
   719  	if t.IndirectElem() {
   720  		vmem := newobject(t.Elem)
   721  		*(*unsafe.Pointer)(elem) = vmem
   722  	}
   723  	typedmemmove(t.Key, insertk, key)
   724  	*inserti = top
   725  	h.count++
   726  
   727  done:
   728  	if h.flags&hashWriting == 0 {
   729  		fatal("concurrent map writes")
   730  	}
   731  	h.flags &^= hashWriting
   732  	if t.IndirectElem() {
   733  		elem = *((*unsafe.Pointer)(elem))
   734  	}
   735  	return elem
   736  }
   737  
   738  // mapdelete should be an internal detail,
   739  // but widely used packages access it using linkname.
   740  // Notable members of the hall of shame include:
   741  //   - github.com/ugorji/go/codec
   742  //
   743  // Do not remove or change the type signature.
   744  // See go.dev/issue/67401.
   745  //
   746  //go:linkname mapdelete
   747  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   748  	if raceenabled && h != nil {
   749  		callerpc := sys.GetCallerPC()
   750  		pc := abi.FuncPCABIInternal(mapdelete)
   751  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   752  		raceReadObjectPC(t.Key, key, callerpc, pc)
   753  	}
   754  	if msanenabled && h != nil {
   755  		msanread(key, t.Key.Size_)
   756  	}
   757  	if asanenabled && h != nil {
   758  		asanread(key, t.Key.Size_)
   759  	}
   760  	if h == nil || h.count == 0 {
   761  		if err := maps.OldMapKeyError(t, key); err != nil {
   762  			panic(err) // see issue 23734
   763  		}
   764  		return
   765  	}
   766  	if h.flags&hashWriting != 0 {
   767  		fatal("concurrent map writes")
   768  	}
   769  
   770  	hash := t.Hasher(key, uintptr(h.hash0))
   771  
   772  	// Set hashWriting after calling t.hasher, since t.hasher may panic,
   773  	// in which case we have not actually done a write (delete).
   774  	h.flags ^= hashWriting
   775  
   776  	bucket := hash & bucketMask(h.B)
   777  	if h.growing() {
   778  		growWork(t, h, bucket)
   779  	}
   780  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   781  	bOrig := b
   782  	top := tophash(hash)
   783  search:
   784  	for ; b != nil; b = b.overflow(t) {
   785  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
   786  			if b.tophash[i] != top {
   787  				if b.tophash[i] == emptyRest {
   788  					break search
   789  				}
   790  				continue
   791  			}
   792  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   793  			k2 := k
   794  			if t.IndirectKey() {
   795  				k2 = *((*unsafe.Pointer)(k2))
   796  			}
   797  			if !t.Key.Equal(key, k2) {
   798  				continue
   799  			}
   800  			// Only clear key if there are pointers in it.
   801  			if t.IndirectKey() {
   802  				*(*unsafe.Pointer)(k) = nil
   803  			} else if t.Key.Pointers() {
   804  				memclrHasPointers(k, t.Key.Size_)
   805  			}
   806  			e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   807  			if t.IndirectElem() {
   808  				*(*unsafe.Pointer)(e) = nil
   809  			} else if t.Elem.Pointers() {
   810  				memclrHasPointers(e, t.Elem.Size_)
   811  			} else {
   812  				memclrNoHeapPointers(e, t.Elem.Size_)
   813  			}
   814  			b.tophash[i] = emptyOne
   815  			// If the bucket now ends in a bunch of emptyOne states,
   816  			// change those to emptyRest states.
   817  			// It would be nice to make this a separate function, but
   818  			// for loops are not currently inlineable.
   819  			if i == abi.OldMapBucketCount-1 {
   820  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   821  					goto notLast
   822  				}
   823  			} else {
   824  				if b.tophash[i+1] != emptyRest {
   825  					goto notLast
   826  				}
   827  			}
   828  			for {
   829  				b.tophash[i] = emptyRest
   830  				if i == 0 {
   831  					if b == bOrig {
   832  						break // beginning of initial bucket, we're done.
   833  					}
   834  					// Find previous bucket, continue at its last entry.
   835  					c := b
   836  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   837  					}
   838  					i = abi.OldMapBucketCount - 1
   839  				} else {
   840  					i--
   841  				}
   842  				if b.tophash[i] != emptyOne {
   843  					break
   844  				}
   845  			}
   846  		notLast:
   847  			h.count--
   848  			// Reset the hash seed to make it more difficult for attackers to
   849  			// repeatedly trigger hash collisions. See issue 25237.
   850  			if h.count == 0 {
   851  				h.hash0 = uint32(rand())
   852  			}
   853  			break search
   854  		}
   855  	}
   856  
   857  	if h.flags&hashWriting == 0 {
   858  		fatal("concurrent map writes")
   859  	}
   860  	h.flags &^= hashWriting
   861  }
   862  
   863  // mapiterinit initializes the hiter struct used for ranging over maps.
   864  // The hiter struct pointed to by 'it' is allocated on the stack
   865  // by the compilers order pass or on the heap by reflect_mapiterinit.
   866  // Both need to have zeroed hiter since the struct contains pointers.
   867  //
   868  // mapiterinit should be an internal detail,
   869  // but widely used packages access it using linkname.
   870  // Notable members of the hall of shame include:
   871  //   - github.com/bytedance/sonic
   872  //   - github.com/goccy/go-json
   873  //   - github.com/RomiChan/protobuf
   874  //   - github.com/segmentio/encoding
   875  //   - github.com/ugorji/go/codec
   876  //   - github.com/wI2L/jettison
   877  //
   878  // Do not remove or change the type signature.
   879  // See go.dev/issue/67401.
   880  //
   881  //go:linkname mapiterinit
   882  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   883  	if raceenabled && h != nil {
   884  		callerpc := sys.GetCallerPC()
   885  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
   886  	}
   887  
   888  	it.t = t
   889  	if h == nil || h.count == 0 {
   890  		return
   891  	}
   892  
   893  	if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize {
   894  		throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
   895  	}
   896  	it.h = h
   897  	it.clearSeq = h.clearSeq
   898  
   899  	// grab snapshot of bucket state
   900  	it.B = h.B
   901  	it.buckets = h.buckets
   902  	if !t.Bucket.Pointers() {
   903  		// Allocate the current slice and remember pointers to both current and old.
   904  		// This preserves all relevant overflow buckets alive even if
   905  		// the table grows and/or overflow buckets are added to the table
   906  		// while we are iterating.
   907  		h.createOverflow()
   908  		it.overflow = h.extra.overflow
   909  		it.oldoverflow = h.extra.oldoverflow
   910  	}
   911  
   912  	// decide where to start
   913  	r := uintptr(rand())
   914  	it.startBucket = r & bucketMask(h.B)
   915  	it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1))
   916  
   917  	// iterator state
   918  	it.bucket = it.startBucket
   919  
   920  	// Remember we have an iterator.
   921  	// Can run concurrently with another mapiterinit().
   922  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   923  		atomic.Or8(&h.flags, iterator|oldIterator)
   924  	}
   925  
   926  	mapiternext(it)
   927  }
   928  
   929  // mapiternext should be an internal detail,
   930  // but widely used packages access it using linkname.
   931  // Notable members of the hall of shame include:
   932  //   - github.com/bytedance/sonic
   933  //   - github.com/RomiChan/protobuf
   934  //   - github.com/segmentio/encoding
   935  //   - github.com/ugorji/go/codec
   936  //   - gonum.org/v1/gonum
   937  //
   938  // Do not remove or change the type signature.
   939  // See go.dev/issue/67401.
   940  //
   941  //go:linkname mapiternext
   942  func mapiternext(it *hiter) {
   943  	h := it.h
   944  	if raceenabled {
   945  		callerpc := sys.GetCallerPC()
   946  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
   947  	}
   948  	if h.flags&hashWriting != 0 {
   949  		fatal("concurrent map iteration and map write")
   950  	}
   951  	t := it.t
   952  	bucket := it.bucket
   953  	b := it.bptr
   954  	i := it.i
   955  	checkBucket := it.checkBucket
   956  
   957  next:
   958  	if b == nil {
   959  		if bucket == it.startBucket && it.wrapped {
   960  			// end of iteration
   961  			it.key = nil
   962  			it.elem = nil
   963  			return
   964  		}
   965  		if h.growing() && it.B == h.B {
   966  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   967  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   968  			// bucket hasn't been evacuated) then we need to iterate through the old
   969  			// bucket and only return the ones that will be migrated to this bucket.
   970  			oldbucket := bucket & it.h.oldbucketmask()
   971  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   972  			if !evacuated(b) {
   973  				checkBucket = bucket
   974  			} else {
   975  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
   976  				checkBucket = noCheck
   977  			}
   978  		} else {
   979  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
   980  			checkBucket = noCheck
   981  		}
   982  		bucket++
   983  		if bucket == bucketShift(it.B) {
   984  			bucket = 0
   985  			it.wrapped = true
   986  		}
   987  		i = 0
   988  	}
   989  	for ; i < abi.OldMapBucketCount; i++ {
   990  		offi := (i + it.offset) & (abi.OldMapBucketCount - 1)
   991  		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
   992  			// TODO: emptyRest is hard to use here, as we start iterating
   993  			// in the middle of a bucket. It's feasible, just tricky.
   994  			continue
   995  		}
   996  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
   997  		if t.IndirectKey() {
   998  			k = *((*unsafe.Pointer)(k))
   999  		}
  1000  		e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
  1001  		if checkBucket != noCheck && !h.sameSizeGrow() {
  1002  			// Special case: iterator was started during a grow to a larger size
  1003  			// and the grow is not done yet. We're working on a bucket whose
  1004  			// oldbucket has not been evacuated yet. Or at least, it wasn't
  1005  			// evacuated when we started the bucket. So we're iterating
  1006  			// through the oldbucket, skipping any keys that will go
  1007  			// to the other new bucket (each oldbucket expands to two
  1008  			// buckets during a grow).
  1009  			if t.ReflexiveKey() || t.Key.Equal(k, k) {
  1010  				// If the item in the oldbucket is not destined for
  1011  				// the current new bucket in the iteration, skip it.
  1012  				hash := t.Hasher(k, uintptr(h.hash0))
  1013  				if hash&bucketMask(it.B) != checkBucket {
  1014  					continue
  1015  				}
  1016  			} else {
  1017  				// Hash isn't repeatable if k != k (NaNs).  We need a
  1018  				// repeatable and randomish choice of which direction
  1019  				// to send NaNs during evacuation. We'll use the low
  1020  				// bit of tophash to decide which way NaNs go.
  1021  				// NOTE: this case is why we need two evacuate tophash
  1022  				// values, evacuatedX and evacuatedY, that differ in
  1023  				// their low bit.
  1024  				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
  1025  					continue
  1026  				}
  1027  			}
  1028  		}
  1029  		if it.clearSeq == h.clearSeq &&
  1030  			((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
  1031  				!(t.ReflexiveKey() || t.Key.Equal(k, k))) {
  1032  			// This is the golden data, we can return it.
  1033  			// OR
  1034  			// key!=key, so the entry can't be deleted or updated, so we can just return it.
  1035  			// That's lucky for us because when key!=key we can't look it up successfully.
  1036  			it.key = k
  1037  			if t.IndirectElem() {
  1038  				e = *((*unsafe.Pointer)(e))
  1039  			}
  1040  			it.elem = e
  1041  		} else {
  1042  			// The hash table has grown since the iterator was started.
  1043  			// The golden data for this key is now somewhere else.
  1044  			// Check the current hash table for the data.
  1045  			// This code handles the case where the key
  1046  			// has been deleted, updated, or deleted and reinserted.
  1047  			// NOTE: we need to regrab the key as it has potentially been
  1048  			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
  1049  			rk, re := mapaccessK(t, h, k)
  1050  			if rk == nil {
  1051  				continue // key has been deleted
  1052  			}
  1053  			it.key = rk
  1054  			it.elem = re
  1055  		}
  1056  		it.bucket = bucket
  1057  		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
  1058  			it.bptr = b
  1059  		}
  1060  		it.i = i + 1
  1061  		it.checkBucket = checkBucket
  1062  		return
  1063  	}
  1064  	b = b.overflow(t)
  1065  	i = 0
  1066  	goto next
  1067  }
  1068  
  1069  // mapclear deletes all keys from a map.
  1070  // It is called by the compiler.
  1071  func mapclear(t *maptype, h *hmap) {
  1072  	if raceenabled && h != nil {
  1073  		callerpc := sys.GetCallerPC()
  1074  		pc := abi.FuncPCABIInternal(mapclear)
  1075  		racewritepc(unsafe.Pointer(h), callerpc, pc)
  1076  	}
  1077  
  1078  	if h == nil || h.count == 0 {
  1079  		return
  1080  	}
  1081  
  1082  	if h.flags&hashWriting != 0 {
  1083  		fatal("concurrent map writes")
  1084  	}
  1085  
  1086  	h.flags ^= hashWriting
  1087  	h.flags &^= sameSizeGrow
  1088  	h.oldbuckets = nil
  1089  	h.nevacuate = 0
  1090  	h.noverflow = 0
  1091  	h.count = 0
  1092  	h.clearSeq++
  1093  
  1094  	// Reset the hash seed to make it more difficult for attackers to
  1095  	// repeatedly trigger hash collisions. See issue 25237.
  1096  	h.hash0 = uint32(rand())
  1097  
  1098  	// Keep the mapextra allocation but clear any extra information.
  1099  	if h.extra != nil {
  1100  		*h.extra = mapextra{}
  1101  	}
  1102  
  1103  	// makeBucketArray clears the memory pointed to by h.buckets
  1104  	// and recovers any overflow buckets by generating them
  1105  	// as if h.buckets was newly alloced.
  1106  	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
  1107  	if nextOverflow != nil {
  1108  		// If overflow buckets are created then h.extra
  1109  		// will have been allocated during initial bucket creation.
  1110  		h.extra.nextOverflow = nextOverflow
  1111  	}
  1112  
  1113  	if h.flags&hashWriting == 0 {
  1114  		fatal("concurrent map writes")
  1115  	}
  1116  	h.flags &^= hashWriting
  1117  }
  1118  
  1119  func hashGrow(t *maptype, h *hmap) {
  1120  	// If we've hit the load factor, get bigger.
  1121  	// Otherwise, there are too many overflow buckets,
  1122  	// so keep the same number of buckets and "grow" laterally.
  1123  	bigger := uint8(1)
  1124  	if !overLoadFactor(h.count+1, h.B) {
  1125  		bigger = 0
  1126  		h.flags |= sameSizeGrow
  1127  	}
  1128  	oldbuckets := h.buckets
  1129  	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
  1130  
  1131  	flags := h.flags &^ (iterator | oldIterator)
  1132  	if h.flags&iterator != 0 {
  1133  		flags |= oldIterator
  1134  	}
  1135  	// commit the grow (atomic wrt gc)
  1136  	h.B += bigger
  1137  	h.flags = flags
  1138  	h.oldbuckets = oldbuckets
  1139  	h.buckets = newbuckets
  1140  	h.nevacuate = 0
  1141  	h.noverflow = 0
  1142  
  1143  	if h.extra != nil && h.extra.overflow != nil {
  1144  		// Promote current overflow buckets to the old generation.
  1145  		if h.extra.oldoverflow != nil {
  1146  			throw("oldoverflow is not nil")
  1147  		}
  1148  		h.extra.oldoverflow = h.extra.overflow
  1149  		h.extra.overflow = nil
  1150  	}
  1151  	if nextOverflow != nil {
  1152  		if h.extra == nil {
  1153  			h.extra = new(mapextra)
  1154  		}
  1155  		h.extra.nextOverflow = nextOverflow
  1156  	}
  1157  
  1158  	// the actual copying of the hash table data is done incrementally
  1159  	// by growWork() and evacuate().
  1160  }
  1161  
  1162  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
  1163  func overLoadFactor(count int, B uint8) bool {
  1164  	return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
  1165  }
  1166  
  1167  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
  1168  // Note that most of these overflow buckets must be in sparse use;
  1169  // if use was dense, then we'd have already triggered regular map growth.
  1170  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
  1171  	// If the threshold is too low, we do extraneous work.
  1172  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
  1173  	// "too many" means (approximately) as many overflow buckets as regular buckets.
  1174  	// See incrnoverflow for more details.
  1175  	if B > 15 {
  1176  		B = 15
  1177  	}
  1178  	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
  1179  	return noverflow >= uint16(1)<<(B&15)
  1180  }
  1181  
  1182  // growing reports whether h is growing. The growth may be to the same size or bigger.
  1183  func (h *hmap) growing() bool {
  1184  	return h.oldbuckets != nil
  1185  }
  1186  
  1187  // sameSizeGrow reports whether the current growth is to a map of the same size.
  1188  func (h *hmap) sameSizeGrow() bool {
  1189  	return h.flags&sameSizeGrow != 0
  1190  }
  1191  
  1192  //go:linkname sameSizeGrowForIssue69110Test
  1193  func sameSizeGrowForIssue69110Test(h *hmap) bool {
  1194  	return h.sameSizeGrow()
  1195  }
  1196  
  1197  // noldbuckets calculates the number of buckets prior to the current map growth.
  1198  func (h *hmap) noldbuckets() uintptr {
  1199  	oldB := h.B
  1200  	if !h.sameSizeGrow() {
  1201  		oldB--
  1202  	}
  1203  	return bucketShift(oldB)
  1204  }
  1205  
  1206  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
  1207  func (h *hmap) oldbucketmask() uintptr {
  1208  	return h.noldbuckets() - 1
  1209  }
  1210  
  1211  func growWork(t *maptype, h *hmap, bucket uintptr) {
  1212  	// make sure we evacuate the oldbucket corresponding
  1213  	// to the bucket we're about to use
  1214  	evacuate(t, h, bucket&h.oldbucketmask())
  1215  
  1216  	// evacuate one more oldbucket to make progress on growing
  1217  	if h.growing() {
  1218  		evacuate(t, h, h.nevacuate)
  1219  	}
  1220  }
  1221  
  1222  func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
  1223  	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
  1224  	return evacuated(b)
  1225  }
  1226  
  1227  // evacDst is an evacuation destination.
  1228  type evacDst struct {
  1229  	b *bmap          // current destination bucket
  1230  	i int            // key/elem index into b
  1231  	k unsafe.Pointer // pointer to current key storage
  1232  	e unsafe.Pointer // pointer to current elem storage
  1233  }
  1234  
  1235  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
  1236  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
  1237  	newbit := h.noldbuckets()
  1238  	if !evacuated(b) {
  1239  		// TODO: reuse overflow buckets instead of using new ones, if there
  1240  		// is no iterator using the old buckets.  (If !oldIterator.)
  1241  
  1242  		// xy contains the x and y (low and high) evacuation destinations.
  1243  		var xy [2]evacDst
  1244  		x := &xy[0]
  1245  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
  1246  		x.k = add(unsafe.Pointer(x.b), dataOffset)
  1247  		x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize))
  1248  
  1249  		if !h.sameSizeGrow() {
  1250  			// Only calculate y pointers if we're growing bigger.
  1251  			// Otherwise GC can see bad pointers.
  1252  			y := &xy[1]
  1253  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
  1254  			y.k = add(unsafe.Pointer(y.b), dataOffset)
  1255  			y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize))
  1256  		}
  1257  
  1258  		for ; b != nil; b = b.overflow(t) {
  1259  			k := add(unsafe.Pointer(b), dataOffset)
  1260  			e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize))
  1261  			for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
  1262  				top := b.tophash[i]
  1263  				if isEmpty(top) {
  1264  					b.tophash[i] = evacuatedEmpty
  1265  					continue
  1266  				}
  1267  				if top < minTopHash {
  1268  					throw("bad map state")
  1269  				}
  1270  				k2 := k
  1271  				if t.IndirectKey() {
  1272  					k2 = *((*unsafe.Pointer)(k2))
  1273  				}
  1274  				var useY uint8
  1275  				if !h.sameSizeGrow() {
  1276  					// Compute hash to make our evacuation decision (whether we need
  1277  					// to send this key/elem to bucket x or bucket y).
  1278  					hash := t.Hasher(k2, uintptr(h.hash0))
  1279  					if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
  1280  						// If key != key (NaNs), then the hash could be (and probably
  1281  						// will be) entirely different from the old hash. Moreover,
  1282  						// it isn't reproducible. Reproducibility is required in the
  1283  						// presence of iterators, as our evacuation decision must
  1284  						// match whatever decision the iterator made.
  1285  						// Fortunately, we have the freedom to send these keys either
  1286  						// way. Also, tophash is meaningless for these kinds of keys.
  1287  						// We let the low bit of tophash drive the evacuation decision.
  1288  						// We recompute a new random tophash for the next level so
  1289  						// these keys will get evenly distributed across all buckets
  1290  						// after multiple grows.
  1291  						useY = top & 1
  1292  						top = tophash(hash)
  1293  					} else {
  1294  						if hash&newbit != 0 {
  1295  							useY = 1
  1296  						}
  1297  					}
  1298  				}
  1299  
  1300  				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
  1301  					throw("bad evacuatedN")
  1302  				}
  1303  
  1304  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
  1305  				dst := &xy[useY]                 // evacuation destination
  1306  
  1307  				if dst.i == abi.OldMapBucketCount {
  1308  					dst.b = h.newoverflow(t, dst.b)
  1309  					dst.i = 0
  1310  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
  1311  					dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize))
  1312  				}
  1313  				dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
  1314  				if t.IndirectKey() {
  1315  					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
  1316  				} else {
  1317  					typedmemmove(t.Key, dst.k, k) // copy elem
  1318  				}
  1319  				if t.IndirectElem() {
  1320  					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
  1321  				} else {
  1322  					typedmemmove(t.Elem, dst.e, e)
  1323  				}
  1324  				dst.i++
  1325  				// These updates might push these pointers past the end of the
  1326  				// key or elem arrays.  That's ok, as we have the overflow pointer
  1327  				// at the end of the bucket to protect against pointing past the
  1328  				// end of the bucket.
  1329  				dst.k = add(dst.k, uintptr(t.KeySize))
  1330  				dst.e = add(dst.e, uintptr(t.ValueSize))
  1331  			}
  1332  		}
  1333  		// Unlink the overflow buckets & clear key/elem to help GC.
  1334  		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
  1335  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
  1336  			// Preserve b.tophash because the evacuation
  1337  			// state is maintained there.
  1338  			ptr := add(b, dataOffset)
  1339  			n := uintptr(t.BucketSize) - dataOffset
  1340  			memclrHasPointers(ptr, n)
  1341  		}
  1342  	}
  1343  
  1344  	if oldbucket == h.nevacuate {
  1345  		advanceEvacuationMark(h, t, newbit)
  1346  	}
  1347  }
  1348  
  1349  func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
  1350  	h.nevacuate++
  1351  	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
  1352  	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
  1353  	stop := h.nevacuate + 1024
  1354  	if stop > newbit {
  1355  		stop = newbit
  1356  	}
  1357  	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
  1358  		h.nevacuate++
  1359  	}
  1360  	if h.nevacuate == newbit { // newbit == # of oldbuckets
  1361  		// Growing is all done. Free old main bucket array.
  1362  		h.oldbuckets = nil
  1363  		// Can discard old overflow buckets as well.
  1364  		// If they are still referenced by an iterator,
  1365  		// then the iterator holds a pointers to the slice.
  1366  		if h.extra != nil {
  1367  			h.extra.oldoverflow = nil
  1368  		}
  1369  		h.flags &^= sameSizeGrow
  1370  	}
  1371  }
  1372  
  1373  // Reflect stubs. Called from ../reflect/asm_*.s
  1374  
  1375  // reflect_makemap is for package reflect,
  1376  // but widely used packages access it using linkname.
  1377  // Notable members of the hall of shame include:
  1378  //   - gitee.com/quant1x/gox
  1379  //   - github.com/modern-go/reflect2
  1380  //   - github.com/goccy/go-json
  1381  //   - github.com/RomiChan/protobuf
  1382  //   - github.com/segmentio/encoding
  1383  //   - github.com/v2pro/plz
  1384  //
  1385  // Do not remove or change the type signature.
  1386  // See go.dev/issue/67401.
  1387  //
  1388  //go:linkname reflect_makemap reflect.makemap
  1389  func reflect_makemap(t *maptype, cap int) *hmap {
  1390  	// Check invariants and reflects math.
  1391  	if t.Key.Equal == nil {
  1392  		throw("runtime.reflect_makemap: unsupported map key type")
  1393  	}
  1394  	if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
  1395  		t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
  1396  		throw("key size wrong")
  1397  	}
  1398  	if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
  1399  		t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
  1400  		throw("elem size wrong")
  1401  	}
  1402  	if t.Key.Align_ > abi.OldMapBucketCount {
  1403  		throw("key align too big")
  1404  	}
  1405  	if t.Elem.Align_ > abi.OldMapBucketCount {
  1406  		throw("elem align too big")
  1407  	}
  1408  	if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
  1409  		throw("key size not a multiple of key align")
  1410  	}
  1411  	if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
  1412  		throw("elem size not a multiple of elem align")
  1413  	}
  1414  	if abi.OldMapBucketCount < 8 {
  1415  		throw("bucketsize too small for proper alignment")
  1416  	}
  1417  	if dataOffset%uintptr(t.Key.Align_) != 0 {
  1418  		throw("need padding in bucket (key)")
  1419  	}
  1420  	if dataOffset%uintptr(t.Elem.Align_) != 0 {
  1421  		throw("need padding in bucket (elem)")
  1422  	}
  1423  
  1424  	return makemap(t, cap, nil)
  1425  }
  1426  
  1427  // reflect_mapaccess is for package reflect,
  1428  // but widely used packages access it using linkname.
  1429  // Notable members of the hall of shame include:
  1430  //   - gitee.com/quant1x/gox
  1431  //   - github.com/modern-go/reflect2
  1432  //   - github.com/v2pro/plz
  1433  //
  1434  // Do not remove or change the type signature.
  1435  // See go.dev/issue/67401.
  1436  //
  1437  //go:linkname reflect_mapaccess reflect.mapaccess
  1438  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1439  	elem, ok := mapaccess2(t, h, key)
  1440  	if !ok {
  1441  		// reflect wants nil for a missing element
  1442  		elem = nil
  1443  	}
  1444  	return elem
  1445  }
  1446  
  1447  //go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
  1448  func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
  1449  	elem, ok := mapaccess2_faststr(t, h, key)
  1450  	if !ok {
  1451  		// reflect wants nil for a missing element
  1452  		elem = nil
  1453  	}
  1454  	return elem
  1455  }
  1456  
  1457  // reflect_mapassign is for package reflect,
  1458  // but widely used packages access it using linkname.
  1459  // Notable members of the hall of shame include:
  1460  //   - gitee.com/quant1x/gox
  1461  //   - github.com/v2pro/plz
  1462  //
  1463  // Do not remove or change the type signature.
  1464  //
  1465  //go:linkname reflect_mapassign reflect.mapassign0
  1466  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
  1467  	p := mapassign(t, h, key)
  1468  	typedmemmove(t.Elem, p, elem)
  1469  }
  1470  
  1471  //go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
  1472  func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
  1473  	p := mapassign_faststr(t, h, key)
  1474  	typedmemmove(t.Elem, p, elem)
  1475  }
  1476  
  1477  //go:linkname reflect_mapdelete reflect.mapdelete
  1478  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1479  	mapdelete(t, h, key)
  1480  }
  1481  
  1482  //go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
  1483  func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
  1484  	mapdelete_faststr(t, h, key)
  1485  }
  1486  
  1487  // reflect_mapiterinit is for package reflect,
  1488  // but widely used packages access it using linkname.
  1489  // Notable members of the hall of shame include:
  1490  //   - github.com/modern-go/reflect2
  1491  //   - gitee.com/quant1x/gox
  1492  //   - github.com/v2pro/plz
  1493  //   - github.com/wI2L/jettison
  1494  //
  1495  // Do not remove or change the type signature.
  1496  // See go.dev/issue/67401.
  1497  //
  1498  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1499  func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
  1500  	mapiterinit(t, h, it)
  1501  }
  1502  
  1503  // reflect_mapiternext is for package reflect,
  1504  // but widely used packages access it using linkname.
  1505  // Notable members of the hall of shame include:
  1506  //   - gitee.com/quant1x/gox
  1507  //   - github.com/modern-go/reflect2
  1508  //   - github.com/goccy/go-json
  1509  //   - github.com/v2pro/plz
  1510  //   - github.com/wI2L/jettison
  1511  //
  1512  // Do not remove or change the type signature.
  1513  // See go.dev/issue/67401.
  1514  //
  1515  //go:linkname reflect_mapiternext reflect.mapiternext
  1516  func reflect_mapiternext(it *hiter) {
  1517  	mapiternext(it)
  1518  }
  1519  
  1520  // reflect_mapiterkey was for package reflect,
  1521  // but widely used packages access it using linkname.
  1522  // Notable members of the hall of shame include:
  1523  //   - github.com/goccy/go-json
  1524  //   - gonum.org/v1/gonum
  1525  //
  1526  // Do not remove or change the type signature.
  1527  // See go.dev/issue/67401.
  1528  //
  1529  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1530  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1531  	return it.key
  1532  }
  1533  
  1534  // reflect_mapiterelem was for package reflect,
  1535  // but widely used packages access it using linkname.
  1536  // Notable members of the hall of shame include:
  1537  //   - github.com/goccy/go-json
  1538  //   - gonum.org/v1/gonum
  1539  //
  1540  // Do not remove or change the type signature.
  1541  // See go.dev/issue/67401.
  1542  //
  1543  //go:linkname reflect_mapiterelem reflect.mapiterelem
  1544  func reflect_mapiterelem(it *hiter) unsafe.Pointer {
  1545  	return it.elem
  1546  }
  1547  
  1548  // reflect_maplen is for package reflect,
  1549  // but widely used packages access it using linkname.
  1550  // Notable members of the hall of shame include:
  1551  //   - github.com/goccy/go-json
  1552  //   - github.com/wI2L/jettison
  1553  //
  1554  // Do not remove or change the type signature.
  1555  // See go.dev/issue/67401.
  1556  //
  1557  //go:linkname reflect_maplen reflect.maplen
  1558  func reflect_maplen(h *hmap) int {
  1559  	if h == nil {
  1560  		return 0
  1561  	}
  1562  	if raceenabled {
  1563  		callerpc := sys.GetCallerPC()
  1564  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
  1565  	}
  1566  	return h.count
  1567  }
  1568  
  1569  //go:linkname reflect_mapclear reflect.mapclear
  1570  func reflect_mapclear(t *maptype, h *hmap) {
  1571  	mapclear(t, h)
  1572  }
  1573  
  1574  //go:linkname reflectlite_maplen internal/reflectlite.maplen
  1575  func reflectlite_maplen(h *hmap) int {
  1576  	if h == nil {
  1577  		return 0
  1578  	}
  1579  	if raceenabled {
  1580  		callerpc := sys.GetCallerPC()
  1581  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
  1582  	}
  1583  	return h.count
  1584  }
  1585  
  1586  // mapinitnoop is a no-op function known the Go linker; if a given global
  1587  // map (of the right size) is determined to be dead, the linker will
  1588  // rewrite the relocation (from the package init func) from the outlined
  1589  // map init function to this symbol. Defined in assembly so as to avoid
  1590  // complications with instrumentation (coverage, etc).
  1591  func mapinitnoop()
  1592  
  1593  // mapclone for implementing maps.Clone
  1594  //
  1595  //go:linkname mapclone maps.clone
  1596  func mapclone(m any) any {
  1597  	e := efaceOf(&m)
  1598  	e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
  1599  	return m
  1600  }
  1601  
  1602  // moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
  1603  // and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
  1604  func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
  1605  	for i := 0; i < abi.OldMapBucketCount; i++ {
  1606  		if isEmpty(src.tophash[i]) {
  1607  			continue
  1608  		}
  1609  
  1610  		for ; pos < abi.OldMapBucketCount; pos++ {
  1611  			if isEmpty(dst.tophash[pos]) {
  1612  				break
  1613  			}
  1614  		}
  1615  
  1616  		if pos == abi.OldMapBucketCount {
  1617  			dst = h.newoverflow(t, dst)
  1618  			pos = 0
  1619  		}
  1620  
  1621  		srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
  1622  		srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
  1623  		dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
  1624  		dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
  1625  
  1626  		dst.tophash[pos] = src.tophash[i]
  1627  		if t.IndirectKey() {
  1628  			srcK = *(*unsafe.Pointer)(srcK)
  1629  			if t.NeedKeyUpdate() {
  1630  				kStore := newobject(t.Key)
  1631  				typedmemmove(t.Key, kStore, srcK)
  1632  				srcK = kStore
  1633  			}
  1634  			// Note: if NeedKeyUpdate is false, then the memory
  1635  			// used to store the key is immutable, so we can share
  1636  			// it between the original map and its clone.
  1637  			*(*unsafe.Pointer)(dstK) = srcK
  1638  		} else {
  1639  			typedmemmove(t.Key, dstK, srcK)
  1640  		}
  1641  		if t.IndirectElem() {
  1642  			srcEle = *(*unsafe.Pointer)(srcEle)
  1643  			eStore := newobject(t.Elem)
  1644  			typedmemmove(t.Elem, eStore, srcEle)
  1645  			*(*unsafe.Pointer)(dstEle) = eStore
  1646  		} else {
  1647  			typedmemmove(t.Elem, dstEle, srcEle)
  1648  		}
  1649  		pos++
  1650  		h.count++
  1651  	}
  1652  	return dst, pos
  1653  }
  1654  
  1655  func mapclone2(t *maptype, src *hmap) *hmap {
  1656  	hint := src.count
  1657  	if overLoadFactor(hint, src.B) {
  1658  		// Note: in rare cases (e.g. during a same-sized grow) the map
  1659  		// can be overloaded. Make sure we don't allocate a destination
  1660  		// bucket array larger than the source bucket array.
  1661  		// This will cause the cloned map to be overloaded also,
  1662  		// but that's better than crashing. See issue 69110.
  1663  		hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
  1664  	}
  1665  	dst := makemap(t, hint, nil)
  1666  	dst.hash0 = src.hash0
  1667  	dst.nevacuate = 0
  1668  	// flags do not need to be copied here, just like a new map has no flags.
  1669  
  1670  	if src.count == 0 {
  1671  		return dst
  1672  	}
  1673  
  1674  	if src.flags&hashWriting != 0 {
  1675  		fatal("concurrent map clone and map write")
  1676  	}
  1677  
  1678  	if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
  1679  		// Quick copy for small maps.
  1680  		dst.buckets = newobject(t.Bucket)
  1681  		dst.count = src.count
  1682  		typedmemmove(t.Bucket, dst.buckets, src.buckets)
  1683  		return dst
  1684  	}
  1685  
  1686  	if dst.B == 0 {
  1687  		dst.buckets = newobject(t.Bucket)
  1688  	}
  1689  	dstArraySize := int(bucketShift(dst.B))
  1690  	srcArraySize := int(bucketShift(src.B))
  1691  	for i := 0; i < dstArraySize; i++ {
  1692  		dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
  1693  		pos := 0
  1694  		for j := 0; j < srcArraySize; j += dstArraySize {
  1695  			srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
  1696  			for srcBmap != nil {
  1697  				dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
  1698  				srcBmap = srcBmap.overflow(t)
  1699  			}
  1700  		}
  1701  	}
  1702  
  1703  	if src.oldbuckets == nil {
  1704  		return dst
  1705  	}
  1706  
  1707  	oldB := src.B
  1708  	srcOldbuckets := src.oldbuckets
  1709  	if !src.sameSizeGrow() {
  1710  		oldB--
  1711  	}
  1712  	oldSrcArraySize := int(bucketShift(oldB))
  1713  
  1714  	for i := 0; i < oldSrcArraySize; i++ {
  1715  		srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
  1716  		if evacuated(srcBmap) {
  1717  			continue
  1718  		}
  1719  
  1720  		if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
  1721  			dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
  1722  			for dstBmap.overflow(t) != nil {
  1723  				dstBmap = dstBmap.overflow(t)
  1724  			}
  1725  			pos := 0
  1726  			for srcBmap != nil {
  1727  				dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
  1728  				srcBmap = srcBmap.overflow(t)
  1729  			}
  1730  			continue
  1731  		}
  1732  
  1733  		// oldB < dst.B, so a single source bucket may go to multiple destination buckets.
  1734  		// Process entries one at a time.
  1735  		for srcBmap != nil {
  1736  			// move from oldBlucket to new bucket
  1737  			for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
  1738  				if isEmpty(srcBmap.tophash[i]) {
  1739  					continue
  1740  				}
  1741  
  1742  				if src.flags&hashWriting != 0 {
  1743  					fatal("concurrent map clone and map write")
  1744  				}
  1745  
  1746  				srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
  1747  				if t.IndirectKey() {
  1748  					srcK = *((*unsafe.Pointer)(srcK))
  1749  				}
  1750  
  1751  				srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
  1752  				if t.IndirectElem() {
  1753  					srcEle = *((*unsafe.Pointer)(srcEle))
  1754  				}
  1755  				dstEle := mapassign(t, dst, srcK)
  1756  				typedmemmove(t.Elem, dstEle, srcEle)
  1757  			}
  1758  			srcBmap = srcBmap.overflow(t)
  1759  		}
  1760  	}
  1761  	return dst
  1762  }
  1763  
  1764  // keys for implementing maps.keys
  1765  //
  1766  //go:linkname keys maps.keys
  1767  func keys(m any, p unsafe.Pointer) {
  1768  	e := efaceOf(&m)
  1769  	t := (*maptype)(unsafe.Pointer(e._type))
  1770  	h := (*hmap)(e.data)
  1771  
  1772  	if h == nil || h.count == 0 {
  1773  		return
  1774  	}
  1775  	s := (*slice)(p)
  1776  	r := int(rand())
  1777  	offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
  1778  	if h.B == 0 {
  1779  		copyKeys(t, h, (*bmap)(h.buckets), s, offset)
  1780  		return
  1781  	}
  1782  	arraySize := int(bucketShift(h.B))
  1783  	buckets := h.buckets
  1784  	for i := 0; i < arraySize; i++ {
  1785  		bucket := (i + r) & (arraySize - 1)
  1786  		b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1787  		copyKeys(t, h, b, s, offset)
  1788  	}
  1789  
  1790  	if h.growing() {
  1791  		oldArraySize := int(h.noldbuckets())
  1792  		for i := 0; i < oldArraySize; i++ {
  1793  			bucket := (i + r) & (oldArraySize - 1)
  1794  			b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1795  			if evacuated(b) {
  1796  				continue
  1797  			}
  1798  			copyKeys(t, h, b, s, offset)
  1799  		}
  1800  	}
  1801  	return
  1802  }
  1803  
  1804  func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
  1805  	for b != nil {
  1806  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
  1807  			offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
  1808  			if isEmpty(b.tophash[offi]) {
  1809  				continue
  1810  			}
  1811  			if h.flags&hashWriting != 0 {
  1812  				fatal("concurrent map read and map write")
  1813  			}
  1814  			k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
  1815  			if t.IndirectKey() {
  1816  				k = *((*unsafe.Pointer)(k))
  1817  			}
  1818  			if s.len >= s.cap {
  1819  				fatal("concurrent map read and map write")
  1820  			}
  1821  			typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
  1822  			s.len++
  1823  		}
  1824  		b = b.overflow(t)
  1825  	}
  1826  }
  1827  
  1828  // values for implementing maps.values
  1829  //
  1830  //go:linkname values maps.values
  1831  func values(m any, p unsafe.Pointer) {
  1832  	e := efaceOf(&m)
  1833  	t := (*maptype)(unsafe.Pointer(e._type))
  1834  	h := (*hmap)(e.data)
  1835  	if h == nil || h.count == 0 {
  1836  		return
  1837  	}
  1838  	s := (*slice)(p)
  1839  	r := int(rand())
  1840  	offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
  1841  	if h.B == 0 {
  1842  		copyValues(t, h, (*bmap)(h.buckets), s, offset)
  1843  		return
  1844  	}
  1845  	arraySize := int(bucketShift(h.B))
  1846  	buckets := h.buckets
  1847  	for i := 0; i < arraySize; i++ {
  1848  		bucket := (i + r) & (arraySize - 1)
  1849  		b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1850  		copyValues(t, h, b, s, offset)
  1851  	}
  1852  
  1853  	if h.growing() {
  1854  		oldArraySize := int(h.noldbuckets())
  1855  		for i := 0; i < oldArraySize; i++ {
  1856  			bucket := (i + r) & (oldArraySize - 1)
  1857  			b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1858  			if evacuated(b) {
  1859  				continue
  1860  			}
  1861  			copyValues(t, h, b, s, offset)
  1862  		}
  1863  	}
  1864  	return
  1865  }
  1866  
  1867  func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
  1868  	for b != nil {
  1869  		for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
  1870  			offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
  1871  			if isEmpty(b.tophash[offi]) {
  1872  				continue
  1873  			}
  1874  
  1875  			if h.flags&hashWriting != 0 {
  1876  				fatal("concurrent map read and map write")
  1877  			}
  1878  
  1879  			ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
  1880  			if t.IndirectElem() {
  1881  				ele = *((*unsafe.Pointer)(ele))
  1882  			}
  1883  			if s.len >= s.cap {
  1884  				fatal("concurrent map read and map write")
  1885  			}
  1886  			typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
  1887  			s.len++
  1888  		}
  1889  		b = b.overflow(t)
  1890  	}
  1891  }
  1892  

View as plain text