Source file src/runtime/map.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table. The data is arranged
    10  // into an array of buckets. Each bucket contains up to
    11  // 8 key/elem pairs. The low-order bits of the hash are
    12  // used to select a bucket. Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big. Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space. I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and elems)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/elem pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows. Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"internal/abi"
    58  	"internal/goarch"
    59  	"internal/runtime/atomic"
    60  	"runtime/internal/math"
    61  	"unsafe"
    62  )
    63  
    64  const (
    65  	// Maximum number of key/elem pairs a bucket can hold.
    66  	bucketCntBits = abi.MapBucketCountBits
    67  
    68  	// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
    69  	// Because of minimum alignment rules, bucketCnt is known to be at least 8.
    70  	// Represent as loadFactorNum/loadFactorDen, to allow integer math.
    71  	loadFactorDen = 2
    72  	loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
    73  
    74  	// data offset should be the size of the bmap struct, but needs to be
    75  	// aligned correctly. For amd64p32 this means 64-bit alignment
    76  	// even though pointers are 32 bit.
    77  	dataOffset = unsafe.Offsetof(struct {
    78  		b bmap
    79  		v int64
    80  	}{}.v)
    81  
    82  	// Possible tophash values. We reserve a few possibilities for special marks.
    83  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    84  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    85  	// during map writes and thus no one else can observe the map during that time).
    86  	emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
    87  	emptyOne       = 1 // this cell is empty
    88  	evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
    89  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    90  	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
    91  	minTopHash     = 5 // minimum tophash for a normal filled cell.
    92  
    93  	// flags
    94  	iterator     = 1 // there may be an iterator using buckets
    95  	oldIterator  = 2 // there may be an iterator using oldbuckets
    96  	hashWriting  = 4 // a goroutine is writing to the map
    97  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
    98  
    99  	// sentinel bucket ID for iterator checks
   100  	noCheck = 1<<(8*goarch.PtrSize) - 1
   101  )
   102  
   103  // isEmpty reports whether the given tophash array entry represents an empty bucket entry.
   104  func isEmpty(x uint8) bool {
   105  	return x <= emptyOne
   106  }
   107  
   108  // A header for a Go map.
   109  type hmap struct {
   110  	// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
   111  	// Make sure this stays in sync with the compiler's definition.
   112  	count     int // # live cells == size of map.  Must be first (used by len() builtin)
   113  	flags     uint8
   114  	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   115  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
   116  	hash0     uint32 // hash seed
   117  
   118  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   119  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   120  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   121  
   122  	extra *mapextra // optional fields
   123  }
   124  
   125  // mapextra holds fields that are not present on all maps.
   126  type mapextra struct {
   127  	// If both key and elem do not contain pointers and are inline, then we mark bucket
   128  	// type as containing no pointers. This avoids scanning such maps.
   129  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   130  	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
   131  	// overflow and oldoverflow are only used if key and elem do not contain pointers.
   132  	// overflow contains overflow buckets for hmap.buckets.
   133  	// oldoverflow contains overflow buckets for hmap.oldbuckets.
   134  	// The indirection allows to store a pointer to the slice in hiter.
   135  	overflow    *[]*bmap
   136  	oldoverflow *[]*bmap
   137  
   138  	// nextOverflow holds a pointer to a free overflow bucket.
   139  	nextOverflow *bmap
   140  }
   141  
   142  // A bucket for a Go map.
   143  type bmap struct {
   144  	// tophash generally contains the top byte of the hash value
   145  	// for each key in this bucket. If tophash[0] < minTopHash,
   146  	// tophash[0] is a bucket evacuation state instead.
   147  	tophash [abi.MapBucketCount]uint8
   148  	// Followed by bucketCnt keys and then bucketCnt elems.
   149  	// NOTE: packing all the keys together and then all the elems together makes the
   150  	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
   151  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   152  	// Followed by an overflow pointer.
   153  }
   154  
   155  // A hash iteration structure.
   156  // If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
   157  // and reflect/value.go to match the layout of this structure.
   158  type hiter struct {
   159  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
   160  	elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
   161  	t           *maptype
   162  	h           *hmap
   163  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   164  	bptr        *bmap          // current bucket
   165  	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
   166  	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
   167  	startBucket uintptr        // bucket iteration started at
   168  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   169  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   170  	B           uint8
   171  	i           uint8
   172  	bucket      uintptr
   173  	checkBucket uintptr
   174  }
   175  
   176  // bucketShift returns 1<<b, optimized for code generation.
   177  func bucketShift(b uint8) uintptr {
   178  	// Masking the shift amount allows overflow checks to be elided.
   179  	return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
   180  }
   181  
   182  // bucketMask returns 1<<b - 1, optimized for code generation.
   183  func bucketMask(b uint8) uintptr {
   184  	return bucketShift(b) - 1
   185  }
   186  
   187  // tophash calculates the tophash value for hash.
   188  func tophash(hash uintptr) uint8 {
   189  	top := uint8(hash >> (goarch.PtrSize*8 - 8))
   190  	if top < minTopHash {
   191  		top += minTopHash
   192  	}
   193  	return top
   194  }
   195  
   196  func evacuated(b *bmap) bool {
   197  	h := b.tophash[0]
   198  	return h > emptyOne && h < minTopHash
   199  }
   200  
   201  func (b *bmap) overflow(t *maptype) *bmap {
   202  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
   203  }
   204  
   205  func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
   206  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
   207  }
   208  
   209  func (b *bmap) keys() unsafe.Pointer {
   210  	return add(unsafe.Pointer(b), dataOffset)
   211  }
   212  
   213  // incrnoverflow increments h.noverflow.
   214  // noverflow counts the number of overflow buckets.
   215  // This is used to trigger same-size map growth.
   216  // See also tooManyOverflowBuckets.
   217  // To keep hmap small, noverflow is a uint16.
   218  // When there are few buckets, noverflow is an exact count.
   219  // When there are many buckets, noverflow is an approximate count.
   220  func (h *hmap) incrnoverflow() {
   221  	// We trigger same-size map growth if there are
   222  	// as many overflow buckets as buckets.
   223  	// We need to be able to count to 1<<h.B.
   224  	if h.B < 16 {
   225  		h.noverflow++
   226  		return
   227  	}
   228  	// Increment with probability 1/(1<<(h.B-15)).
   229  	// When we reach 1<<15 - 1, we will have approximately
   230  	// as many overflow buckets as buckets.
   231  	mask := uint32(1)<<(h.B-15) - 1
   232  	// Example: if h.B == 18, then mask == 7,
   233  	// and rand() & 7 == 0 with probability 1/8.
   234  	if uint32(rand())&mask == 0 {
   235  		h.noverflow++
   236  	}
   237  }
   238  
   239  func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
   240  	var ovf *bmap
   241  	if h.extra != nil && h.extra.nextOverflow != nil {
   242  		// We have preallocated overflow buckets available.
   243  		// See makeBucketArray for more details.
   244  		ovf = h.extra.nextOverflow
   245  		if ovf.overflow(t) == nil {
   246  			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
   247  			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
   248  		} else {
   249  			// This is the last preallocated overflow bucket.
   250  			// Reset the overflow pointer on this bucket,
   251  			// which was set to a non-nil sentinel value.
   252  			ovf.setoverflow(t, nil)
   253  			h.extra.nextOverflow = nil
   254  		}
   255  	} else {
   256  		ovf = (*bmap)(newobject(t.Bucket))
   257  	}
   258  	h.incrnoverflow()
   259  	if !t.Bucket.Pointers() {
   260  		h.createOverflow()
   261  		*h.extra.overflow = append(*h.extra.overflow, ovf)
   262  	}
   263  	b.setoverflow(t, ovf)
   264  	return ovf
   265  }
   266  
   267  func (h *hmap) createOverflow() {
   268  	if h.extra == nil {
   269  		h.extra = new(mapextra)
   270  	}
   271  	if h.extra.overflow == nil {
   272  		h.extra.overflow = new([]*bmap)
   273  	}
   274  }
   275  
   276  func makemap64(t *maptype, hint int64, h *hmap) *hmap {
   277  	if int64(int(hint)) != hint {
   278  		hint = 0
   279  	}
   280  	return makemap(t, int(hint), h)
   281  }
   282  
   283  // makemap_small implements Go map creation for make(map[k]v) and
   284  // make(map[k]v, hint) when hint is known to be at most bucketCnt
   285  // at compile time and the map needs to be allocated on the heap.
   286  func makemap_small() *hmap {
   287  	h := new(hmap)
   288  	h.hash0 = uint32(rand())
   289  	return h
   290  }
   291  
   292  // makemap implements Go map creation for make(map[k]v, hint).
   293  // If the compiler has determined that the map or the first bucket
   294  // can be created on the stack, h and/or bucket may be non-nil.
   295  // If h != nil, the map can be created directly in h.
   296  // If h.buckets != nil, bucket pointed to can be used as the first bucket.
   297  func makemap(t *maptype, hint int, h *hmap) *hmap {
   298  	mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
   299  	if overflow || mem > maxAlloc {
   300  		hint = 0
   301  	}
   302  
   303  	// initialize Hmap
   304  	if h == nil {
   305  		h = new(hmap)
   306  	}
   307  	h.hash0 = uint32(rand())
   308  
   309  	// Find the size parameter B which will hold the requested # of elements.
   310  	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
   311  	B := uint8(0)
   312  	for overLoadFactor(hint, B) {
   313  		B++
   314  	}
   315  	h.B = B
   316  
   317  	// allocate initial hash table
   318  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   319  	// If hint is large zeroing this memory could take a while.
   320  	if h.B != 0 {
   321  		var nextOverflow *bmap
   322  		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
   323  		if nextOverflow != nil {
   324  			h.extra = new(mapextra)
   325  			h.extra.nextOverflow = nextOverflow
   326  		}
   327  	}
   328  
   329  	return h
   330  }
   331  
   332  // makeBucketArray initializes a backing array for map buckets.
   333  // 1<<b is the minimum number of buckets to allocate.
   334  // dirtyalloc should either be nil or a bucket array previously
   335  // allocated by makeBucketArray with the same t and b parameters.
   336  // If dirtyalloc is nil a new backing array will be alloced and
   337  // otherwise dirtyalloc will be cleared and reused as backing array.
   338  func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
   339  	base := bucketShift(b)
   340  	nbuckets := base
   341  	// For small b, overflow buckets are unlikely.
   342  	// Avoid the overhead of the calculation.
   343  	if b >= 4 {
   344  		// Add on the estimated number of overflow buckets
   345  		// required to insert the median number of elements
   346  		// used with this value of b.
   347  		nbuckets += bucketShift(b - 4)
   348  		sz := t.Bucket.Size_ * nbuckets
   349  		up := roundupsize(sz, !t.Bucket.Pointers())
   350  		if up != sz {
   351  			nbuckets = up / t.Bucket.Size_
   352  		}
   353  	}
   354  
   355  	if dirtyalloc == nil {
   356  		buckets = newarray(t.Bucket, int(nbuckets))
   357  	} else {
   358  		// dirtyalloc was previously generated by
   359  		// the above newarray(t.Bucket, int(nbuckets))
   360  		// but may not be empty.
   361  		buckets = dirtyalloc
   362  		size := t.Bucket.Size_ * nbuckets
   363  		if t.Bucket.Pointers() {
   364  			memclrHasPointers(buckets, size)
   365  		} else {
   366  			memclrNoHeapPointers(buckets, size)
   367  		}
   368  	}
   369  
   370  	if base != nbuckets {
   371  		// We preallocated some overflow buckets.
   372  		// To keep the overhead of tracking these overflow buckets to a minimum,
   373  		// we use the convention that if a preallocated overflow bucket's overflow
   374  		// pointer is nil, then there are more available by bumping the pointer.
   375  		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
   376  		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
   377  		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
   378  		last.setoverflow(t, (*bmap)(buckets))
   379  	}
   380  	return buckets, nextOverflow
   381  }
   382  
   383  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   384  // it will return a reference to the zero object for the elem type if
   385  // the key is not in the map.
   386  // NOTE: The returned pointer may keep the whole map live, so don't
   387  // hold onto it for very long.
   388  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   389  	if raceenabled && h != nil {
   390  		callerpc := getcallerpc()
   391  		pc := abi.FuncPCABIInternal(mapaccess1)
   392  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   393  		raceReadObjectPC(t.Key, key, callerpc, pc)
   394  	}
   395  	if msanenabled && h != nil {
   396  		msanread(key, t.Key.Size_)
   397  	}
   398  	if asanenabled && h != nil {
   399  		asanread(key, t.Key.Size_)
   400  	}
   401  	if h == nil || h.count == 0 {
   402  		if err := mapKeyError(t, key); err != nil {
   403  			panic(err) // see issue 23734
   404  		}
   405  		return unsafe.Pointer(&zeroVal[0])
   406  	}
   407  	if h.flags&hashWriting != 0 {
   408  		fatal("concurrent map read and map write")
   409  	}
   410  	hash := t.Hasher(key, uintptr(h.hash0))
   411  	m := bucketMask(h.B)
   412  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   413  	if c := h.oldbuckets; c != nil {
   414  		if !h.sameSizeGrow() {
   415  			// There used to be half as many buckets; mask down one more power of two.
   416  			m >>= 1
   417  		}
   418  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   419  		if !evacuated(oldb) {
   420  			b = oldb
   421  		}
   422  	}
   423  	top := tophash(hash)
   424  bucketloop:
   425  	for ; b != nil; b = b.overflow(t) {
   426  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
   427  			if b.tophash[i] != top {
   428  				if b.tophash[i] == emptyRest {
   429  					break bucketloop
   430  				}
   431  				continue
   432  			}
   433  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   434  			if t.IndirectKey() {
   435  				k = *((*unsafe.Pointer)(k))
   436  			}
   437  			if t.Key.Equal(key, k) {
   438  				e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   439  				if t.IndirectElem() {
   440  					e = *((*unsafe.Pointer)(e))
   441  				}
   442  				return e
   443  			}
   444  		}
   445  	}
   446  	return unsafe.Pointer(&zeroVal[0])
   447  }
   448  
   449  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   450  	if raceenabled && h != nil {
   451  		callerpc := getcallerpc()
   452  		pc := abi.FuncPCABIInternal(mapaccess2)
   453  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   454  		raceReadObjectPC(t.Key, key, callerpc, pc)
   455  	}
   456  	if msanenabled && h != nil {
   457  		msanread(key, t.Key.Size_)
   458  	}
   459  	if asanenabled && h != nil {
   460  		asanread(key, t.Key.Size_)
   461  	}
   462  	if h == nil || h.count == 0 {
   463  		if err := mapKeyError(t, key); err != nil {
   464  			panic(err) // see issue 23734
   465  		}
   466  		return unsafe.Pointer(&zeroVal[0]), false
   467  	}
   468  	if h.flags&hashWriting != 0 {
   469  		fatal("concurrent map read and map write")
   470  	}
   471  	hash := t.Hasher(key, uintptr(h.hash0))
   472  	m := bucketMask(h.B)
   473  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   474  	if c := h.oldbuckets; c != nil {
   475  		if !h.sameSizeGrow() {
   476  			// There used to be half as many buckets; mask down one more power of two.
   477  			m >>= 1
   478  		}
   479  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   480  		if !evacuated(oldb) {
   481  			b = oldb
   482  		}
   483  	}
   484  	top := tophash(hash)
   485  bucketloop:
   486  	for ; b != nil; b = b.overflow(t) {
   487  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
   488  			if b.tophash[i] != top {
   489  				if b.tophash[i] == emptyRest {
   490  					break bucketloop
   491  				}
   492  				continue
   493  			}
   494  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   495  			if t.IndirectKey() {
   496  				k = *((*unsafe.Pointer)(k))
   497  			}
   498  			if t.Key.Equal(key, k) {
   499  				e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   500  				if t.IndirectElem() {
   501  					e = *((*unsafe.Pointer)(e))
   502  				}
   503  				return e, true
   504  			}
   505  		}
   506  	}
   507  	return unsafe.Pointer(&zeroVal[0]), false
   508  }
   509  
   510  // returns both key and elem. Used by map iterator.
   511  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   512  	if h == nil || h.count == 0 {
   513  		return nil, nil
   514  	}
   515  	hash := t.Hasher(key, uintptr(h.hash0))
   516  	m := bucketMask(h.B)
   517  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
   518  	if c := h.oldbuckets; c != nil {
   519  		if !h.sameSizeGrow() {
   520  			// There used to be half as many buckets; mask down one more power of two.
   521  			m >>= 1
   522  		}
   523  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
   524  		if !evacuated(oldb) {
   525  			b = oldb
   526  		}
   527  	}
   528  	top := tophash(hash)
   529  bucketloop:
   530  	for ; b != nil; b = b.overflow(t) {
   531  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
   532  			if b.tophash[i] != top {
   533  				if b.tophash[i] == emptyRest {
   534  					break bucketloop
   535  				}
   536  				continue
   537  			}
   538  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   539  			if t.IndirectKey() {
   540  				k = *((*unsafe.Pointer)(k))
   541  			}
   542  			if t.Key.Equal(key, k) {
   543  				e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   544  				if t.IndirectElem() {
   545  					e = *((*unsafe.Pointer)(e))
   546  				}
   547  				return k, e
   548  			}
   549  		}
   550  	}
   551  	return nil, nil
   552  }
   553  
   554  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   555  	e := mapaccess1(t, h, key)
   556  	if e == unsafe.Pointer(&zeroVal[0]) {
   557  		return zero
   558  	}
   559  	return e
   560  }
   561  
   562  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   563  	e := mapaccess1(t, h, key)
   564  	if e == unsafe.Pointer(&zeroVal[0]) {
   565  		return zero, false
   566  	}
   567  	return e, true
   568  }
   569  
   570  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
   571  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   572  	if h == nil {
   573  		panic(plainError("assignment to entry in nil map"))
   574  	}
   575  	if raceenabled {
   576  		callerpc := getcallerpc()
   577  		pc := abi.FuncPCABIInternal(mapassign)
   578  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   579  		raceReadObjectPC(t.Key, key, callerpc, pc)
   580  	}
   581  	if msanenabled {
   582  		msanread(key, t.Key.Size_)
   583  	}
   584  	if asanenabled {
   585  		asanread(key, t.Key.Size_)
   586  	}
   587  	if h.flags&hashWriting != 0 {
   588  		fatal("concurrent map writes")
   589  	}
   590  	hash := t.Hasher(key, uintptr(h.hash0))
   591  
   592  	// Set hashWriting after calling t.hasher, since t.hasher may panic,
   593  	// in which case we have not actually done a write.
   594  	h.flags ^= hashWriting
   595  
   596  	if h.buckets == nil {
   597  		h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
   598  	}
   599  
   600  again:
   601  	bucket := hash & bucketMask(h.B)
   602  	if h.growing() {
   603  		growWork(t, h, bucket)
   604  	}
   605  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   606  	top := tophash(hash)
   607  
   608  	var inserti *uint8
   609  	var insertk unsafe.Pointer
   610  	var elem unsafe.Pointer
   611  bucketloop:
   612  	for {
   613  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
   614  			if b.tophash[i] != top {
   615  				if isEmpty(b.tophash[i]) && inserti == nil {
   616  					inserti = &b.tophash[i]
   617  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   618  					elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   619  				}
   620  				if b.tophash[i] == emptyRest {
   621  					break bucketloop
   622  				}
   623  				continue
   624  			}
   625  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   626  			if t.IndirectKey() {
   627  				k = *((*unsafe.Pointer)(k))
   628  			}
   629  			if !t.Key.Equal(key, k) {
   630  				continue
   631  			}
   632  			// already have a mapping for key. Update it.
   633  			if t.NeedKeyUpdate() {
   634  				typedmemmove(t.Key, k, key)
   635  			}
   636  			elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   637  			goto done
   638  		}
   639  		ovf := b.overflow(t)
   640  		if ovf == nil {
   641  			break
   642  		}
   643  		b = ovf
   644  	}
   645  
   646  	// Did not find mapping for key. Allocate new cell & add entry.
   647  
   648  	// If we hit the max load factor or we have too many overflow buckets,
   649  	// and we're not already in the middle of growing, start growing.
   650  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   651  		hashGrow(t, h)
   652  		goto again // Growing the table invalidates everything, so try again
   653  	}
   654  
   655  	if inserti == nil {
   656  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   657  		newb := h.newoverflow(t, b)
   658  		inserti = &newb.tophash[0]
   659  		insertk = add(unsafe.Pointer(newb), dataOffset)
   660  		elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
   661  	}
   662  
   663  	// store new key/elem at insert position
   664  	if t.IndirectKey() {
   665  		kmem := newobject(t.Key)
   666  		*(*unsafe.Pointer)(insertk) = kmem
   667  		insertk = kmem
   668  	}
   669  	if t.IndirectElem() {
   670  		vmem := newobject(t.Elem)
   671  		*(*unsafe.Pointer)(elem) = vmem
   672  	}
   673  	typedmemmove(t.Key, insertk, key)
   674  	*inserti = top
   675  	h.count++
   676  
   677  done:
   678  	if h.flags&hashWriting == 0 {
   679  		fatal("concurrent map writes")
   680  	}
   681  	h.flags &^= hashWriting
   682  	if t.IndirectElem() {
   683  		elem = *((*unsafe.Pointer)(elem))
   684  	}
   685  	return elem
   686  }
   687  
   688  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   689  	if raceenabled && h != nil {
   690  		callerpc := getcallerpc()
   691  		pc := abi.FuncPCABIInternal(mapdelete)
   692  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   693  		raceReadObjectPC(t.Key, key, callerpc, pc)
   694  	}
   695  	if msanenabled && h != nil {
   696  		msanread(key, t.Key.Size_)
   697  	}
   698  	if asanenabled && h != nil {
   699  		asanread(key, t.Key.Size_)
   700  	}
   701  	if h == nil || h.count == 0 {
   702  		if err := mapKeyError(t, key); err != nil {
   703  			panic(err) // see issue 23734
   704  		}
   705  		return
   706  	}
   707  	if h.flags&hashWriting != 0 {
   708  		fatal("concurrent map writes")
   709  	}
   710  
   711  	hash := t.Hasher(key, uintptr(h.hash0))
   712  
   713  	// Set hashWriting after calling t.hasher, since t.hasher may panic,
   714  	// in which case we have not actually done a write (delete).
   715  	h.flags ^= hashWriting
   716  
   717  	bucket := hash & bucketMask(h.B)
   718  	if h.growing() {
   719  		growWork(t, h, bucket)
   720  	}
   721  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
   722  	bOrig := b
   723  	top := tophash(hash)
   724  search:
   725  	for ; b != nil; b = b.overflow(t) {
   726  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
   727  			if b.tophash[i] != top {
   728  				if b.tophash[i] == emptyRest {
   729  					break search
   730  				}
   731  				continue
   732  			}
   733  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
   734  			k2 := k
   735  			if t.IndirectKey() {
   736  				k2 = *((*unsafe.Pointer)(k2))
   737  			}
   738  			if !t.Key.Equal(key, k2) {
   739  				continue
   740  			}
   741  			// Only clear key if there are pointers in it.
   742  			if t.IndirectKey() {
   743  				*(*unsafe.Pointer)(k) = nil
   744  			} else if t.Key.Pointers() {
   745  				memclrHasPointers(k, t.Key.Size_)
   746  			}
   747  			e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
   748  			if t.IndirectElem() {
   749  				*(*unsafe.Pointer)(e) = nil
   750  			} else if t.Elem.Pointers() {
   751  				memclrHasPointers(e, t.Elem.Size_)
   752  			} else {
   753  				memclrNoHeapPointers(e, t.Elem.Size_)
   754  			}
   755  			b.tophash[i] = emptyOne
   756  			// If the bucket now ends in a bunch of emptyOne states,
   757  			// change those to emptyRest states.
   758  			// It would be nice to make this a separate function, but
   759  			// for loops are not currently inlineable.
   760  			if i == abi.MapBucketCount-1 {
   761  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   762  					goto notLast
   763  				}
   764  			} else {
   765  				if b.tophash[i+1] != emptyRest {
   766  					goto notLast
   767  				}
   768  			}
   769  			for {
   770  				b.tophash[i] = emptyRest
   771  				if i == 0 {
   772  					if b == bOrig {
   773  						break // beginning of initial bucket, we're done.
   774  					}
   775  					// Find previous bucket, continue at its last entry.
   776  					c := b
   777  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   778  					}
   779  					i = abi.MapBucketCount - 1
   780  				} else {
   781  					i--
   782  				}
   783  				if b.tophash[i] != emptyOne {
   784  					break
   785  				}
   786  			}
   787  		notLast:
   788  			h.count--
   789  			// Reset the hash seed to make it more difficult for attackers to
   790  			// repeatedly trigger hash collisions. See issue 25237.
   791  			if h.count == 0 {
   792  				h.hash0 = uint32(rand())
   793  			}
   794  			break search
   795  		}
   796  	}
   797  
   798  	if h.flags&hashWriting == 0 {
   799  		fatal("concurrent map writes")
   800  	}
   801  	h.flags &^= hashWriting
   802  }
   803  
   804  // mapiterinit initializes the hiter struct used for ranging over maps.
   805  // The hiter struct pointed to by 'it' is allocated on the stack
   806  // by the compilers order pass or on the heap by reflect_mapiterinit.
   807  // Both need to have zeroed hiter since the struct contains pointers.
   808  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   809  	if raceenabled && h != nil {
   810  		callerpc := getcallerpc()
   811  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
   812  	}
   813  
   814  	it.t = t
   815  	if h == nil || h.count == 0 {
   816  		return
   817  	}
   818  
   819  	if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
   820  		throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
   821  	}
   822  	it.h = h
   823  
   824  	// grab snapshot of bucket state
   825  	it.B = h.B
   826  	it.buckets = h.buckets
   827  	if !t.Bucket.Pointers() {
   828  		// Allocate the current slice and remember pointers to both current and old.
   829  		// This preserves all relevant overflow buckets alive even if
   830  		// the table grows and/or overflow buckets are added to the table
   831  		// while we are iterating.
   832  		h.createOverflow()
   833  		it.overflow = h.extra.overflow
   834  		it.oldoverflow = h.extra.oldoverflow
   835  	}
   836  
   837  	// decide where to start
   838  	r := uintptr(rand())
   839  	it.startBucket = r & bucketMask(h.B)
   840  	it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
   841  
   842  	// iterator state
   843  	it.bucket = it.startBucket
   844  
   845  	// Remember we have an iterator.
   846  	// Can run concurrently with another mapiterinit().
   847  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   848  		atomic.Or8(&h.flags, iterator|oldIterator)
   849  	}
   850  
   851  	mapiternext(it)
   852  }
   853  
   854  func mapiternext(it *hiter) {
   855  	h := it.h
   856  	if raceenabled {
   857  		callerpc := getcallerpc()
   858  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
   859  	}
   860  	if h.flags&hashWriting != 0 {
   861  		fatal("concurrent map iteration and map write")
   862  	}
   863  	t := it.t
   864  	bucket := it.bucket
   865  	b := it.bptr
   866  	i := it.i
   867  	checkBucket := it.checkBucket
   868  
   869  next:
   870  	if b == nil {
   871  		if bucket == it.startBucket && it.wrapped {
   872  			// end of iteration
   873  			it.key = nil
   874  			it.elem = nil
   875  			return
   876  		}
   877  		if h.growing() && it.B == h.B {
   878  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   879  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   880  			// bucket hasn't been evacuated) then we need to iterate through the old
   881  			// bucket and only return the ones that will be migrated to this bucket.
   882  			oldbucket := bucket & it.h.oldbucketmask()
   883  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
   884  			if !evacuated(b) {
   885  				checkBucket = bucket
   886  			} else {
   887  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
   888  				checkBucket = noCheck
   889  			}
   890  		} else {
   891  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
   892  			checkBucket = noCheck
   893  		}
   894  		bucket++
   895  		if bucket == bucketShift(it.B) {
   896  			bucket = 0
   897  			it.wrapped = true
   898  		}
   899  		i = 0
   900  	}
   901  	for ; i < abi.MapBucketCount; i++ {
   902  		offi := (i + it.offset) & (abi.MapBucketCount - 1)
   903  		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
   904  			// TODO: emptyRest is hard to use here, as we start iterating
   905  			// in the middle of a bucket. It's feasible, just tricky.
   906  			continue
   907  		}
   908  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
   909  		if t.IndirectKey() {
   910  			k = *((*unsafe.Pointer)(k))
   911  		}
   912  		e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
   913  		if checkBucket != noCheck && !h.sameSizeGrow() {
   914  			// Special case: iterator was started during a grow to a larger size
   915  			// and the grow is not done yet. We're working on a bucket whose
   916  			// oldbucket has not been evacuated yet. Or at least, it wasn't
   917  			// evacuated when we started the bucket. So we're iterating
   918  			// through the oldbucket, skipping any keys that will go
   919  			// to the other new bucket (each oldbucket expands to two
   920  			// buckets during a grow).
   921  			if t.ReflexiveKey() || t.Key.Equal(k, k) {
   922  				// If the item in the oldbucket is not destined for
   923  				// the current new bucket in the iteration, skip it.
   924  				hash := t.Hasher(k, uintptr(h.hash0))
   925  				if hash&bucketMask(it.B) != checkBucket {
   926  					continue
   927  				}
   928  			} else {
   929  				// Hash isn't repeatable if k != k (NaNs).  We need a
   930  				// repeatable and randomish choice of which direction
   931  				// to send NaNs during evacuation. We'll use the low
   932  				// bit of tophash to decide which way NaNs go.
   933  				// NOTE: this case is why we need two evacuate tophash
   934  				// values, evacuatedX and evacuatedY, that differ in
   935  				// their low bit.
   936  				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   937  					continue
   938  				}
   939  			}
   940  		}
   941  		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
   942  			!(t.ReflexiveKey() || t.Key.Equal(k, k)) {
   943  			// This is the golden data, we can return it.
   944  			// OR
   945  			// key!=key, so the entry can't be deleted or updated, so we can just return it.
   946  			// That's lucky for us because when key!=key we can't look it up successfully.
   947  			it.key = k
   948  			if t.IndirectElem() {
   949  				e = *((*unsafe.Pointer)(e))
   950  			}
   951  			it.elem = e
   952  		} else {
   953  			// The hash table has grown since the iterator was started.
   954  			// The golden data for this key is now somewhere else.
   955  			// Check the current hash table for the data.
   956  			// This code handles the case where the key
   957  			// has been deleted, updated, or deleted and reinserted.
   958  			// NOTE: we need to regrab the key as it has potentially been
   959  			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   960  			rk, re := mapaccessK(t, h, k)
   961  			if rk == nil {
   962  				continue // key has been deleted
   963  			}
   964  			it.key = rk
   965  			it.elem = re
   966  		}
   967  		it.bucket = bucket
   968  		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
   969  			it.bptr = b
   970  		}
   971  		it.i = i + 1
   972  		it.checkBucket = checkBucket
   973  		return
   974  	}
   975  	b = b.overflow(t)
   976  	i = 0
   977  	goto next
   978  }
   979  
   980  // mapclear deletes all keys from a map.
   981  func mapclear(t *maptype, h *hmap) {
   982  	if raceenabled && h != nil {
   983  		callerpc := getcallerpc()
   984  		pc := abi.FuncPCABIInternal(mapclear)
   985  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   986  	}
   987  
   988  	if h == nil || h.count == 0 {
   989  		return
   990  	}
   991  
   992  	if h.flags&hashWriting != 0 {
   993  		fatal("concurrent map writes")
   994  	}
   995  
   996  	h.flags ^= hashWriting
   997  
   998  	// Mark buckets empty, so existing iterators can be terminated, see issue #59411.
   999  	markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
  1000  		for i := uintptr(0); i <= mask; i++ {
  1001  			b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
  1002  			for ; b != nil; b = b.overflow(t) {
  1003  				for i := uintptr(0); i < abi.MapBucketCount; i++ {
  1004  					b.tophash[i] = emptyRest
  1005  				}
  1006  			}
  1007  		}
  1008  	}
  1009  	markBucketsEmpty(h.buckets, bucketMask(h.B))
  1010  	if oldBuckets := h.oldbuckets; oldBuckets != nil {
  1011  		markBucketsEmpty(oldBuckets, h.oldbucketmask())
  1012  	}
  1013  
  1014  	h.flags &^= sameSizeGrow
  1015  	h.oldbuckets = nil
  1016  	h.nevacuate = 0
  1017  	h.noverflow = 0
  1018  	h.count = 0
  1019  
  1020  	// Reset the hash seed to make it more difficult for attackers to
  1021  	// repeatedly trigger hash collisions. See issue 25237.
  1022  	h.hash0 = uint32(rand())
  1023  
  1024  	// Keep the mapextra allocation but clear any extra information.
  1025  	if h.extra != nil {
  1026  		*h.extra = mapextra{}
  1027  	}
  1028  
  1029  	// makeBucketArray clears the memory pointed to by h.buckets
  1030  	// and recovers any overflow buckets by generating them
  1031  	// as if h.buckets was newly alloced.
  1032  	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
  1033  	if nextOverflow != nil {
  1034  		// If overflow buckets are created then h.extra
  1035  		// will have been allocated during initial bucket creation.
  1036  		h.extra.nextOverflow = nextOverflow
  1037  	}
  1038  
  1039  	if h.flags&hashWriting == 0 {
  1040  		fatal("concurrent map writes")
  1041  	}
  1042  	h.flags &^= hashWriting
  1043  }
  1044  
  1045  func hashGrow(t *maptype, h *hmap) {
  1046  	// If we've hit the load factor, get bigger.
  1047  	// Otherwise, there are too many overflow buckets,
  1048  	// so keep the same number of buckets and "grow" laterally.
  1049  	bigger := uint8(1)
  1050  	if !overLoadFactor(h.count+1, h.B) {
  1051  		bigger = 0
  1052  		h.flags |= sameSizeGrow
  1053  	}
  1054  	oldbuckets := h.buckets
  1055  	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
  1056  
  1057  	flags := h.flags &^ (iterator | oldIterator)
  1058  	if h.flags&iterator != 0 {
  1059  		flags |= oldIterator
  1060  	}
  1061  	// commit the grow (atomic wrt gc)
  1062  	h.B += bigger
  1063  	h.flags = flags
  1064  	h.oldbuckets = oldbuckets
  1065  	h.buckets = newbuckets
  1066  	h.nevacuate = 0
  1067  	h.noverflow = 0
  1068  
  1069  	if h.extra != nil && h.extra.overflow != nil {
  1070  		// Promote current overflow buckets to the old generation.
  1071  		if h.extra.oldoverflow != nil {
  1072  			throw("oldoverflow is not nil")
  1073  		}
  1074  		h.extra.oldoverflow = h.extra.overflow
  1075  		h.extra.overflow = nil
  1076  	}
  1077  	if nextOverflow != nil {
  1078  		if h.extra == nil {
  1079  			h.extra = new(mapextra)
  1080  		}
  1081  		h.extra.nextOverflow = nextOverflow
  1082  	}
  1083  
  1084  	// the actual copying of the hash table data is done incrementally
  1085  	// by growWork() and evacuate().
  1086  }
  1087  
  1088  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
  1089  func overLoadFactor(count int, B uint8) bool {
  1090  	return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
  1091  }
  1092  
  1093  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
  1094  // Note that most of these overflow buckets must be in sparse use;
  1095  // if use was dense, then we'd have already triggered regular map growth.
  1096  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
  1097  	// If the threshold is too low, we do extraneous work.
  1098  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
  1099  	// "too many" means (approximately) as many overflow buckets as regular buckets.
  1100  	// See incrnoverflow for more details.
  1101  	if B > 15 {
  1102  		B = 15
  1103  	}
  1104  	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
  1105  	return noverflow >= uint16(1)<<(B&15)
  1106  }
  1107  
  1108  // growing reports whether h is growing. The growth may be to the same size or bigger.
  1109  func (h *hmap) growing() bool {
  1110  	return h.oldbuckets != nil
  1111  }
  1112  
  1113  // sameSizeGrow reports whether the current growth is to a map of the same size.
  1114  func (h *hmap) sameSizeGrow() bool {
  1115  	return h.flags&sameSizeGrow != 0
  1116  }
  1117  
  1118  // noldbuckets calculates the number of buckets prior to the current map growth.
  1119  func (h *hmap) noldbuckets() uintptr {
  1120  	oldB := h.B
  1121  	if !h.sameSizeGrow() {
  1122  		oldB--
  1123  	}
  1124  	return bucketShift(oldB)
  1125  }
  1126  
  1127  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
  1128  func (h *hmap) oldbucketmask() uintptr {
  1129  	return h.noldbuckets() - 1
  1130  }
  1131  
  1132  func growWork(t *maptype, h *hmap, bucket uintptr) {
  1133  	// make sure we evacuate the oldbucket corresponding
  1134  	// to the bucket we're about to use
  1135  	evacuate(t, h, bucket&h.oldbucketmask())
  1136  
  1137  	// evacuate one more oldbucket to make progress on growing
  1138  	if h.growing() {
  1139  		evacuate(t, h, h.nevacuate)
  1140  	}
  1141  }
  1142  
  1143  func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
  1144  	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
  1145  	return evacuated(b)
  1146  }
  1147  
  1148  // evacDst is an evacuation destination.
  1149  type evacDst struct {
  1150  	b *bmap          // current destination bucket
  1151  	i int            // key/elem index into b
  1152  	k unsafe.Pointer // pointer to current key storage
  1153  	e unsafe.Pointer // pointer to current elem storage
  1154  }
  1155  
  1156  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
  1157  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
  1158  	newbit := h.noldbuckets()
  1159  	if !evacuated(b) {
  1160  		// TODO: reuse overflow buckets instead of using new ones, if there
  1161  		// is no iterator using the old buckets.  (If !oldIterator.)
  1162  
  1163  		// xy contains the x and y (low and high) evacuation destinations.
  1164  		var xy [2]evacDst
  1165  		x := &xy[0]
  1166  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
  1167  		x.k = add(unsafe.Pointer(x.b), dataOffset)
  1168  		x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
  1169  
  1170  		if !h.sameSizeGrow() {
  1171  			// Only calculate y pointers if we're growing bigger.
  1172  			// Otherwise GC can see bad pointers.
  1173  			y := &xy[1]
  1174  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
  1175  			y.k = add(unsafe.Pointer(y.b), dataOffset)
  1176  			y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
  1177  		}
  1178  
  1179  		for ; b != nil; b = b.overflow(t) {
  1180  			k := add(unsafe.Pointer(b), dataOffset)
  1181  			e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
  1182  			for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
  1183  				top := b.tophash[i]
  1184  				if isEmpty(top) {
  1185  					b.tophash[i] = evacuatedEmpty
  1186  					continue
  1187  				}
  1188  				if top < minTopHash {
  1189  					throw("bad map state")
  1190  				}
  1191  				k2 := k
  1192  				if t.IndirectKey() {
  1193  					k2 = *((*unsafe.Pointer)(k2))
  1194  				}
  1195  				var useY uint8
  1196  				if !h.sameSizeGrow() {
  1197  					// Compute hash to make our evacuation decision (whether we need
  1198  					// to send this key/elem to bucket x or bucket y).
  1199  					hash := t.Hasher(k2, uintptr(h.hash0))
  1200  					if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
  1201  						// If key != key (NaNs), then the hash could be (and probably
  1202  						// will be) entirely different from the old hash. Moreover,
  1203  						// it isn't reproducible. Reproducibility is required in the
  1204  						// presence of iterators, as our evacuation decision must
  1205  						// match whatever decision the iterator made.
  1206  						// Fortunately, we have the freedom to send these keys either
  1207  						// way. Also, tophash is meaningless for these kinds of keys.
  1208  						// We let the low bit of tophash drive the evacuation decision.
  1209  						// We recompute a new random tophash for the next level so
  1210  						// these keys will get evenly distributed across all buckets
  1211  						// after multiple grows.
  1212  						useY = top & 1
  1213  						top = tophash(hash)
  1214  					} else {
  1215  						if hash&newbit != 0 {
  1216  							useY = 1
  1217  						}
  1218  					}
  1219  				}
  1220  
  1221  				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
  1222  					throw("bad evacuatedN")
  1223  				}
  1224  
  1225  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
  1226  				dst := &xy[useY]                 // evacuation destination
  1227  
  1228  				if dst.i == abi.MapBucketCount {
  1229  					dst.b = h.newoverflow(t, dst.b)
  1230  					dst.i = 0
  1231  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
  1232  					dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
  1233  				}
  1234  				dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
  1235  				if t.IndirectKey() {
  1236  					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
  1237  				} else {
  1238  					typedmemmove(t.Key, dst.k, k) // copy elem
  1239  				}
  1240  				if t.IndirectElem() {
  1241  					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
  1242  				} else {
  1243  					typedmemmove(t.Elem, dst.e, e)
  1244  				}
  1245  				dst.i++
  1246  				// These updates might push these pointers past the end of the
  1247  				// key or elem arrays.  That's ok, as we have the overflow pointer
  1248  				// at the end of the bucket to protect against pointing past the
  1249  				// end of the bucket.
  1250  				dst.k = add(dst.k, uintptr(t.KeySize))
  1251  				dst.e = add(dst.e, uintptr(t.ValueSize))
  1252  			}
  1253  		}
  1254  		// Unlink the overflow buckets & clear key/elem to help GC.
  1255  		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
  1256  			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
  1257  			// Preserve b.tophash because the evacuation
  1258  			// state is maintained there.
  1259  			ptr := add(b, dataOffset)
  1260  			n := uintptr(t.BucketSize) - dataOffset
  1261  			memclrHasPointers(ptr, n)
  1262  		}
  1263  	}
  1264  
  1265  	if oldbucket == h.nevacuate {
  1266  		advanceEvacuationMark(h, t, newbit)
  1267  	}
  1268  }
  1269  
  1270  func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
  1271  	h.nevacuate++
  1272  	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
  1273  	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
  1274  	stop := h.nevacuate + 1024
  1275  	if stop > newbit {
  1276  		stop = newbit
  1277  	}
  1278  	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
  1279  		h.nevacuate++
  1280  	}
  1281  	if h.nevacuate == newbit { // newbit == # of oldbuckets
  1282  		// Growing is all done. Free old main bucket array.
  1283  		h.oldbuckets = nil
  1284  		// Can discard old overflow buckets as well.
  1285  		// If they are still referenced by an iterator,
  1286  		// then the iterator holds a pointers to the slice.
  1287  		if h.extra != nil {
  1288  			h.extra.oldoverflow = nil
  1289  		}
  1290  		h.flags &^= sameSizeGrow
  1291  	}
  1292  }
  1293  
  1294  // Reflect stubs. Called from ../reflect/asm_*.s
  1295  
  1296  //go:linkname reflect_makemap reflect.makemap
  1297  func reflect_makemap(t *maptype, cap int) *hmap {
  1298  	// Check invariants and reflects math.
  1299  	if t.Key.Equal == nil {
  1300  		throw("runtime.reflect_makemap: unsupported map key type")
  1301  	}
  1302  	if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
  1303  		t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
  1304  		throw("key size wrong")
  1305  	}
  1306  	if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
  1307  		t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
  1308  		throw("elem size wrong")
  1309  	}
  1310  	if t.Key.Align_ > abi.MapBucketCount {
  1311  		throw("key align too big")
  1312  	}
  1313  	if t.Elem.Align_ > abi.MapBucketCount {
  1314  		throw("elem align too big")
  1315  	}
  1316  	if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
  1317  		throw("key size not a multiple of key align")
  1318  	}
  1319  	if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
  1320  		throw("elem size not a multiple of elem align")
  1321  	}
  1322  	if abi.MapBucketCount < 8 {
  1323  		throw("bucketsize too small for proper alignment")
  1324  	}
  1325  	if dataOffset%uintptr(t.Key.Align_) != 0 {
  1326  		throw("need padding in bucket (key)")
  1327  	}
  1328  	if dataOffset%uintptr(t.Elem.Align_) != 0 {
  1329  		throw("need padding in bucket (elem)")
  1330  	}
  1331  
  1332  	return makemap(t, cap, nil)
  1333  }
  1334  
  1335  //go:linkname reflect_mapaccess reflect.mapaccess
  1336  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1337  	elem, ok := mapaccess2(t, h, key)
  1338  	if !ok {
  1339  		// reflect wants nil for a missing element
  1340  		elem = nil
  1341  	}
  1342  	return elem
  1343  }
  1344  
  1345  //go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
  1346  func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
  1347  	elem, ok := mapaccess2_faststr(t, h, key)
  1348  	if !ok {
  1349  		// reflect wants nil for a missing element
  1350  		elem = nil
  1351  	}
  1352  	return elem
  1353  }
  1354  
  1355  //go:linkname reflect_mapassign reflect.mapassign0
  1356  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
  1357  	p := mapassign(t, h, key)
  1358  	typedmemmove(t.Elem, p, elem)
  1359  }
  1360  
  1361  //go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
  1362  func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
  1363  	p := mapassign_faststr(t, h, key)
  1364  	typedmemmove(t.Elem, p, elem)
  1365  }
  1366  
  1367  //go:linkname reflect_mapdelete reflect.mapdelete
  1368  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1369  	mapdelete(t, h, key)
  1370  }
  1371  
  1372  //go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
  1373  func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
  1374  	mapdelete_faststr(t, h, key)
  1375  }
  1376  
  1377  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1378  func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
  1379  	mapiterinit(t, h, it)
  1380  }
  1381  
  1382  //go:linkname reflect_mapiternext reflect.mapiternext
  1383  func reflect_mapiternext(it *hiter) {
  1384  	mapiternext(it)
  1385  }
  1386  
  1387  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1388  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1389  	return it.key
  1390  }
  1391  
  1392  //go:linkname reflect_mapiterelem reflect.mapiterelem
  1393  func reflect_mapiterelem(it *hiter) unsafe.Pointer {
  1394  	return it.elem
  1395  }
  1396  
  1397  //go:linkname reflect_maplen reflect.maplen
  1398  func reflect_maplen(h *hmap) int {
  1399  	if h == nil {
  1400  		return 0
  1401  	}
  1402  	if raceenabled {
  1403  		callerpc := getcallerpc()
  1404  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
  1405  	}
  1406  	return h.count
  1407  }
  1408  
  1409  //go:linkname reflect_mapclear reflect.mapclear
  1410  func reflect_mapclear(t *maptype, h *hmap) {
  1411  	mapclear(t, h)
  1412  }
  1413  
  1414  //go:linkname reflectlite_maplen internal/reflectlite.maplen
  1415  func reflectlite_maplen(h *hmap) int {
  1416  	if h == nil {
  1417  		return 0
  1418  	}
  1419  	if raceenabled {
  1420  		callerpc := getcallerpc()
  1421  		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
  1422  	}
  1423  	return h.count
  1424  }
  1425  
  1426  var zeroVal [abi.ZeroValSize]byte
  1427  
  1428  // mapinitnoop is a no-op function known the Go linker; if a given global
  1429  // map (of the right size) is determined to be dead, the linker will
  1430  // rewrite the relocation (from the package init func) from the outlined
  1431  // map init function to this symbol. Defined in assembly so as to avoid
  1432  // complications with instrumentation (coverage, etc).
  1433  func mapinitnoop()
  1434  
  1435  // mapclone for implementing maps.Clone
  1436  //
  1437  //go:linkname mapclone maps.clone
  1438  func mapclone(m any) any {
  1439  	e := efaceOf(&m)
  1440  	e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
  1441  	return m
  1442  }
  1443  
  1444  // moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
  1445  // and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
  1446  func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
  1447  	for i := 0; i < abi.MapBucketCount; i++ {
  1448  		if isEmpty(src.tophash[i]) {
  1449  			continue
  1450  		}
  1451  
  1452  		for ; pos < abi.MapBucketCount; pos++ {
  1453  			if isEmpty(dst.tophash[pos]) {
  1454  				break
  1455  			}
  1456  		}
  1457  
  1458  		if pos == abi.MapBucketCount {
  1459  			dst = h.newoverflow(t, dst)
  1460  			pos = 0
  1461  		}
  1462  
  1463  		srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
  1464  		srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
  1465  		dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
  1466  		dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
  1467  
  1468  		dst.tophash[pos] = src.tophash[i]
  1469  		if t.IndirectKey() {
  1470  			srcK = *(*unsafe.Pointer)(srcK)
  1471  			if t.NeedKeyUpdate() {
  1472  				kStore := newobject(t.Key)
  1473  				typedmemmove(t.Key, kStore, srcK)
  1474  				srcK = kStore
  1475  			}
  1476  			// Note: if NeedKeyUpdate is false, then the memory
  1477  			// used to store the key is immutable, so we can share
  1478  			// it between the original map and its clone.
  1479  			*(*unsafe.Pointer)(dstK) = srcK
  1480  		} else {
  1481  			typedmemmove(t.Key, dstK, srcK)
  1482  		}
  1483  		if t.IndirectElem() {
  1484  			srcEle = *(*unsafe.Pointer)(srcEle)
  1485  			eStore := newobject(t.Elem)
  1486  			typedmemmove(t.Elem, eStore, srcEle)
  1487  			*(*unsafe.Pointer)(dstEle) = eStore
  1488  		} else {
  1489  			typedmemmove(t.Elem, dstEle, srcEle)
  1490  		}
  1491  		pos++
  1492  		h.count++
  1493  	}
  1494  	return dst, pos
  1495  }
  1496  
  1497  func mapclone2(t *maptype, src *hmap) *hmap {
  1498  	dst := makemap(t, src.count, nil)
  1499  	dst.hash0 = src.hash0
  1500  	dst.nevacuate = 0
  1501  	// flags do not need to be copied here, just like a new map has no flags.
  1502  
  1503  	if src.count == 0 {
  1504  		return dst
  1505  	}
  1506  
  1507  	if src.flags&hashWriting != 0 {
  1508  		fatal("concurrent map clone and map write")
  1509  	}
  1510  
  1511  	if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
  1512  		// Quick copy for small maps.
  1513  		dst.buckets = newobject(t.Bucket)
  1514  		dst.count = src.count
  1515  		typedmemmove(t.Bucket, dst.buckets, src.buckets)
  1516  		return dst
  1517  	}
  1518  
  1519  	if dst.B == 0 {
  1520  		dst.buckets = newobject(t.Bucket)
  1521  	}
  1522  	dstArraySize := int(bucketShift(dst.B))
  1523  	srcArraySize := int(bucketShift(src.B))
  1524  	for i := 0; i < dstArraySize; i++ {
  1525  		dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
  1526  		pos := 0
  1527  		for j := 0; j < srcArraySize; j += dstArraySize {
  1528  			srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
  1529  			for srcBmap != nil {
  1530  				dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
  1531  				srcBmap = srcBmap.overflow(t)
  1532  			}
  1533  		}
  1534  	}
  1535  
  1536  	if src.oldbuckets == nil {
  1537  		return dst
  1538  	}
  1539  
  1540  	oldB := src.B
  1541  	srcOldbuckets := src.oldbuckets
  1542  	if !src.sameSizeGrow() {
  1543  		oldB--
  1544  	}
  1545  	oldSrcArraySize := int(bucketShift(oldB))
  1546  
  1547  	for i := 0; i < oldSrcArraySize; i++ {
  1548  		srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
  1549  		if evacuated(srcBmap) {
  1550  			continue
  1551  		}
  1552  
  1553  		if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
  1554  			dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
  1555  			for dstBmap.overflow(t) != nil {
  1556  				dstBmap = dstBmap.overflow(t)
  1557  			}
  1558  			pos := 0
  1559  			for srcBmap != nil {
  1560  				dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
  1561  				srcBmap = srcBmap.overflow(t)
  1562  			}
  1563  			continue
  1564  		}
  1565  
  1566  		// oldB < dst.B, so a single source bucket may go to multiple destination buckets.
  1567  		// Process entries one at a time.
  1568  		for srcBmap != nil {
  1569  			// move from oldBlucket to new bucket
  1570  			for i := uintptr(0); i < abi.MapBucketCount; i++ {
  1571  				if isEmpty(srcBmap.tophash[i]) {
  1572  					continue
  1573  				}
  1574  
  1575  				if src.flags&hashWriting != 0 {
  1576  					fatal("concurrent map clone and map write")
  1577  				}
  1578  
  1579  				srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
  1580  				if t.IndirectKey() {
  1581  					srcK = *((*unsafe.Pointer)(srcK))
  1582  				}
  1583  
  1584  				srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
  1585  				if t.IndirectElem() {
  1586  					srcEle = *((*unsafe.Pointer)(srcEle))
  1587  				}
  1588  				dstEle := mapassign(t, dst, srcK)
  1589  				typedmemmove(t.Elem, dstEle, srcEle)
  1590  			}
  1591  			srcBmap = srcBmap.overflow(t)
  1592  		}
  1593  	}
  1594  	return dst
  1595  }
  1596  
  1597  // keys for implementing maps.keys
  1598  //
  1599  //go:linkname keys maps.keys
  1600  func keys(m any, p unsafe.Pointer) {
  1601  	e := efaceOf(&m)
  1602  	t := (*maptype)(unsafe.Pointer(e._type))
  1603  	h := (*hmap)(e.data)
  1604  
  1605  	if h == nil || h.count == 0 {
  1606  		return
  1607  	}
  1608  	s := (*slice)(p)
  1609  	r := int(rand())
  1610  	offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
  1611  	if h.B == 0 {
  1612  		copyKeys(t, h, (*bmap)(h.buckets), s, offset)
  1613  		return
  1614  	}
  1615  	arraySize := int(bucketShift(h.B))
  1616  	buckets := h.buckets
  1617  	for i := 0; i < arraySize; i++ {
  1618  		bucket := (i + r) & (arraySize - 1)
  1619  		b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1620  		copyKeys(t, h, b, s, offset)
  1621  	}
  1622  
  1623  	if h.growing() {
  1624  		oldArraySize := int(h.noldbuckets())
  1625  		for i := 0; i < oldArraySize; i++ {
  1626  			bucket := (i + r) & (oldArraySize - 1)
  1627  			b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1628  			if evacuated(b) {
  1629  				continue
  1630  			}
  1631  			copyKeys(t, h, b, s, offset)
  1632  		}
  1633  	}
  1634  	return
  1635  }
  1636  
  1637  func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
  1638  	for b != nil {
  1639  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
  1640  			offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
  1641  			if isEmpty(b.tophash[offi]) {
  1642  				continue
  1643  			}
  1644  			if h.flags&hashWriting != 0 {
  1645  				fatal("concurrent map read and map write")
  1646  			}
  1647  			k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
  1648  			if t.IndirectKey() {
  1649  				k = *((*unsafe.Pointer)(k))
  1650  			}
  1651  			if s.len >= s.cap {
  1652  				fatal("concurrent map read and map write")
  1653  			}
  1654  			typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
  1655  			s.len++
  1656  		}
  1657  		b = b.overflow(t)
  1658  	}
  1659  }
  1660  
  1661  // values for implementing maps.values
  1662  //
  1663  //go:linkname values maps.values
  1664  func values(m any, p unsafe.Pointer) {
  1665  	e := efaceOf(&m)
  1666  	t := (*maptype)(unsafe.Pointer(e._type))
  1667  	h := (*hmap)(e.data)
  1668  	if h == nil || h.count == 0 {
  1669  		return
  1670  	}
  1671  	s := (*slice)(p)
  1672  	r := int(rand())
  1673  	offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
  1674  	if h.B == 0 {
  1675  		copyValues(t, h, (*bmap)(h.buckets), s, offset)
  1676  		return
  1677  	}
  1678  	arraySize := int(bucketShift(h.B))
  1679  	buckets := h.buckets
  1680  	for i := 0; i < arraySize; i++ {
  1681  		bucket := (i + r) & (arraySize - 1)
  1682  		b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1683  		copyValues(t, h, b, s, offset)
  1684  	}
  1685  
  1686  	if h.growing() {
  1687  		oldArraySize := int(h.noldbuckets())
  1688  		for i := 0; i < oldArraySize; i++ {
  1689  			bucket := (i + r) & (oldArraySize - 1)
  1690  			b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
  1691  			if evacuated(b) {
  1692  				continue
  1693  			}
  1694  			copyValues(t, h, b, s, offset)
  1695  		}
  1696  	}
  1697  	return
  1698  }
  1699  
  1700  func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
  1701  	for b != nil {
  1702  		for i := uintptr(0); i < abi.MapBucketCount; i++ {
  1703  			offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
  1704  			if isEmpty(b.tophash[offi]) {
  1705  				continue
  1706  			}
  1707  
  1708  			if h.flags&hashWriting != 0 {
  1709  				fatal("concurrent map read and map write")
  1710  			}
  1711  
  1712  			ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
  1713  			if t.IndirectElem() {
  1714  				ele = *((*unsafe.Pointer)(ele))
  1715  			}
  1716  			if s.len >= s.cap {
  1717  				fatal("concurrent map read and map write")
  1718  			}
  1719  			typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
  1720  			s.len++
  1721  		}
  1722  		b = b.overflow(t)
  1723  	}
  1724  }
  1725  

View as plain text