1
2
3
4
5 package runtime
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 import (
57 "internal/abi"
58 "internal/goarch"
59 "internal/runtime/atomic"
60 "runtime/internal/math"
61 "unsafe"
62 )
63
64 const (
65
66 bucketCntBits = abi.MapBucketCountBits
67
68
69
70
71 loadFactorDen = 2
72 loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
73
74
75
76
77 dataOffset = unsafe.Offsetof(struct {
78 b bmap
79 v int64
80 }{}.v)
81
82
83
84
85
86 emptyRest = 0
87 emptyOne = 1
88 evacuatedX = 2
89 evacuatedY = 3
90 evacuatedEmpty = 4
91 minTopHash = 5
92
93
94 iterator = 1
95 oldIterator = 2
96 hashWriting = 4
97 sameSizeGrow = 8
98
99
100 noCheck = 1<<(8*goarch.PtrSize) - 1
101 )
102
103
104 func isEmpty(x uint8) bool {
105 return x <= emptyOne
106 }
107
108
109 type hmap struct {
110
111
112 count int
113 flags uint8
114 B uint8
115 noverflow uint16
116 hash0 uint32
117
118 buckets unsafe.Pointer
119 oldbuckets unsafe.Pointer
120 nevacuate uintptr
121
122 extra *mapextra
123 }
124
125
126 type mapextra struct {
127
128
129
130
131
132
133
134
135 overflow *[]*bmap
136 oldoverflow *[]*bmap
137
138
139 nextOverflow *bmap
140 }
141
142
143 type bmap struct {
144
145
146
147 tophash [abi.MapBucketCount]uint8
148
149
150
151
152
153 }
154
155
156
157
158 type hiter struct {
159 key unsafe.Pointer
160 elem unsafe.Pointer
161 t *maptype
162 h *hmap
163 buckets unsafe.Pointer
164 bptr *bmap
165 overflow *[]*bmap
166 oldoverflow *[]*bmap
167 startBucket uintptr
168 offset uint8
169 wrapped bool
170 B uint8
171 i uint8
172 bucket uintptr
173 checkBucket uintptr
174 }
175
176
177 func bucketShift(b uint8) uintptr {
178
179 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
180 }
181
182
183 func bucketMask(b uint8) uintptr {
184 return bucketShift(b) - 1
185 }
186
187
188 func tophash(hash uintptr) uint8 {
189 top := uint8(hash >> (goarch.PtrSize*8 - 8))
190 if top < minTopHash {
191 top += minTopHash
192 }
193 return top
194 }
195
196 func evacuated(b *bmap) bool {
197 h := b.tophash[0]
198 return h > emptyOne && h < minTopHash
199 }
200
201 func (b *bmap) overflow(t *maptype) *bmap {
202 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
203 }
204
205 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
206 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
207 }
208
209 func (b *bmap) keys() unsafe.Pointer {
210 return add(unsafe.Pointer(b), dataOffset)
211 }
212
213
214
215
216
217
218
219
220 func (h *hmap) incrnoverflow() {
221
222
223
224 if h.B < 16 {
225 h.noverflow++
226 return
227 }
228
229
230
231 mask := uint32(1)<<(h.B-15) - 1
232
233
234 if uint32(rand())&mask == 0 {
235 h.noverflow++
236 }
237 }
238
239 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
240 var ovf *bmap
241 if h.extra != nil && h.extra.nextOverflow != nil {
242
243
244 ovf = h.extra.nextOverflow
245 if ovf.overflow(t) == nil {
246
247 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
248 } else {
249
250
251
252 ovf.setoverflow(t, nil)
253 h.extra.nextOverflow = nil
254 }
255 } else {
256 ovf = (*bmap)(newobject(t.Bucket))
257 }
258 h.incrnoverflow()
259 if !t.Bucket.Pointers() {
260 h.createOverflow()
261 *h.extra.overflow = append(*h.extra.overflow, ovf)
262 }
263 b.setoverflow(t, ovf)
264 return ovf
265 }
266
267 func (h *hmap) createOverflow() {
268 if h.extra == nil {
269 h.extra = new(mapextra)
270 }
271 if h.extra.overflow == nil {
272 h.extra.overflow = new([]*bmap)
273 }
274 }
275
276 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
277 if int64(int(hint)) != hint {
278 hint = 0
279 }
280 return makemap(t, int(hint), h)
281 }
282
283
284
285
286 func makemap_small() *hmap {
287 h := new(hmap)
288 h.hash0 = uint32(rand())
289 return h
290 }
291
292
293
294
295
296
297 func makemap(t *maptype, hint int, h *hmap) *hmap {
298 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
299 if overflow || mem > maxAlloc {
300 hint = 0
301 }
302
303
304 if h == nil {
305 h = new(hmap)
306 }
307 h.hash0 = uint32(rand())
308
309
310
311 B := uint8(0)
312 for overLoadFactor(hint, B) {
313 B++
314 }
315 h.B = B
316
317
318
319
320 if h.B != 0 {
321 var nextOverflow *bmap
322 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
323 if nextOverflow != nil {
324 h.extra = new(mapextra)
325 h.extra.nextOverflow = nextOverflow
326 }
327 }
328
329 return h
330 }
331
332
333
334
335
336
337
338 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
339 base := bucketShift(b)
340 nbuckets := base
341
342
343 if b >= 4 {
344
345
346
347 nbuckets += bucketShift(b - 4)
348 sz := t.Bucket.Size_ * nbuckets
349 up := roundupsize(sz, !t.Bucket.Pointers())
350 if up != sz {
351 nbuckets = up / t.Bucket.Size_
352 }
353 }
354
355 if dirtyalloc == nil {
356 buckets = newarray(t.Bucket, int(nbuckets))
357 } else {
358
359
360
361 buckets = dirtyalloc
362 size := t.Bucket.Size_ * nbuckets
363 if t.Bucket.Pointers() {
364 memclrHasPointers(buckets, size)
365 } else {
366 memclrNoHeapPointers(buckets, size)
367 }
368 }
369
370 if base != nbuckets {
371
372
373
374
375
376 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
377 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
378 last.setoverflow(t, (*bmap)(buckets))
379 }
380 return buckets, nextOverflow
381 }
382
383
384
385
386
387
388 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
389 if raceenabled && h != nil {
390 callerpc := getcallerpc()
391 pc := abi.FuncPCABIInternal(mapaccess1)
392 racereadpc(unsafe.Pointer(h), callerpc, pc)
393 raceReadObjectPC(t.Key, key, callerpc, pc)
394 }
395 if msanenabled && h != nil {
396 msanread(key, t.Key.Size_)
397 }
398 if asanenabled && h != nil {
399 asanread(key, t.Key.Size_)
400 }
401 if h == nil || h.count == 0 {
402 if err := mapKeyError(t, key); err != nil {
403 panic(err)
404 }
405 return unsafe.Pointer(&zeroVal[0])
406 }
407 if h.flags&hashWriting != 0 {
408 fatal("concurrent map read and map write")
409 }
410 hash := t.Hasher(key, uintptr(h.hash0))
411 m := bucketMask(h.B)
412 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
413 if c := h.oldbuckets; c != nil {
414 if !h.sameSizeGrow() {
415
416 m >>= 1
417 }
418 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
419 if !evacuated(oldb) {
420 b = oldb
421 }
422 }
423 top := tophash(hash)
424 bucketloop:
425 for ; b != nil; b = b.overflow(t) {
426 for i := uintptr(0); i < abi.MapBucketCount; i++ {
427 if b.tophash[i] != top {
428 if b.tophash[i] == emptyRest {
429 break bucketloop
430 }
431 continue
432 }
433 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
434 if t.IndirectKey() {
435 k = *((*unsafe.Pointer)(k))
436 }
437 if t.Key.Equal(key, k) {
438 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
439 if t.IndirectElem() {
440 e = *((*unsafe.Pointer)(e))
441 }
442 return e
443 }
444 }
445 }
446 return unsafe.Pointer(&zeroVal[0])
447 }
448
449 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
450 if raceenabled && h != nil {
451 callerpc := getcallerpc()
452 pc := abi.FuncPCABIInternal(mapaccess2)
453 racereadpc(unsafe.Pointer(h), callerpc, pc)
454 raceReadObjectPC(t.Key, key, callerpc, pc)
455 }
456 if msanenabled && h != nil {
457 msanread(key, t.Key.Size_)
458 }
459 if asanenabled && h != nil {
460 asanread(key, t.Key.Size_)
461 }
462 if h == nil || h.count == 0 {
463 if err := mapKeyError(t, key); err != nil {
464 panic(err)
465 }
466 return unsafe.Pointer(&zeroVal[0]), false
467 }
468 if h.flags&hashWriting != 0 {
469 fatal("concurrent map read and map write")
470 }
471 hash := t.Hasher(key, uintptr(h.hash0))
472 m := bucketMask(h.B)
473 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
474 if c := h.oldbuckets; c != nil {
475 if !h.sameSizeGrow() {
476
477 m >>= 1
478 }
479 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
480 if !evacuated(oldb) {
481 b = oldb
482 }
483 }
484 top := tophash(hash)
485 bucketloop:
486 for ; b != nil; b = b.overflow(t) {
487 for i := uintptr(0); i < abi.MapBucketCount; i++ {
488 if b.tophash[i] != top {
489 if b.tophash[i] == emptyRest {
490 break bucketloop
491 }
492 continue
493 }
494 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
495 if t.IndirectKey() {
496 k = *((*unsafe.Pointer)(k))
497 }
498 if t.Key.Equal(key, k) {
499 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
500 if t.IndirectElem() {
501 e = *((*unsafe.Pointer)(e))
502 }
503 return e, true
504 }
505 }
506 }
507 return unsafe.Pointer(&zeroVal[0]), false
508 }
509
510
511 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
512 if h == nil || h.count == 0 {
513 return nil, nil
514 }
515 hash := t.Hasher(key, uintptr(h.hash0))
516 m := bucketMask(h.B)
517 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
518 if c := h.oldbuckets; c != nil {
519 if !h.sameSizeGrow() {
520
521 m >>= 1
522 }
523 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
524 if !evacuated(oldb) {
525 b = oldb
526 }
527 }
528 top := tophash(hash)
529 bucketloop:
530 for ; b != nil; b = b.overflow(t) {
531 for i := uintptr(0); i < abi.MapBucketCount; i++ {
532 if b.tophash[i] != top {
533 if b.tophash[i] == emptyRest {
534 break bucketloop
535 }
536 continue
537 }
538 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
539 if t.IndirectKey() {
540 k = *((*unsafe.Pointer)(k))
541 }
542 if t.Key.Equal(key, k) {
543 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
544 if t.IndirectElem() {
545 e = *((*unsafe.Pointer)(e))
546 }
547 return k, e
548 }
549 }
550 }
551 return nil, nil
552 }
553
554 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
555 e := mapaccess1(t, h, key)
556 if e == unsafe.Pointer(&zeroVal[0]) {
557 return zero
558 }
559 return e
560 }
561
562 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
563 e := mapaccess1(t, h, key)
564 if e == unsafe.Pointer(&zeroVal[0]) {
565 return zero, false
566 }
567 return e, true
568 }
569
570
571 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
572 if h == nil {
573 panic(plainError("assignment to entry in nil map"))
574 }
575 if raceenabled {
576 callerpc := getcallerpc()
577 pc := abi.FuncPCABIInternal(mapassign)
578 racewritepc(unsafe.Pointer(h), callerpc, pc)
579 raceReadObjectPC(t.Key, key, callerpc, pc)
580 }
581 if msanenabled {
582 msanread(key, t.Key.Size_)
583 }
584 if asanenabled {
585 asanread(key, t.Key.Size_)
586 }
587 if h.flags&hashWriting != 0 {
588 fatal("concurrent map writes")
589 }
590 hash := t.Hasher(key, uintptr(h.hash0))
591
592
593
594 h.flags ^= hashWriting
595
596 if h.buckets == nil {
597 h.buckets = newobject(t.Bucket)
598 }
599
600 again:
601 bucket := hash & bucketMask(h.B)
602 if h.growing() {
603 growWork(t, h, bucket)
604 }
605 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
606 top := tophash(hash)
607
608 var inserti *uint8
609 var insertk unsafe.Pointer
610 var elem unsafe.Pointer
611 bucketloop:
612 for {
613 for i := uintptr(0); i < abi.MapBucketCount; i++ {
614 if b.tophash[i] != top {
615 if isEmpty(b.tophash[i]) && inserti == nil {
616 inserti = &b.tophash[i]
617 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
618 elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
619 }
620 if b.tophash[i] == emptyRest {
621 break bucketloop
622 }
623 continue
624 }
625 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
626 if t.IndirectKey() {
627 k = *((*unsafe.Pointer)(k))
628 }
629 if !t.Key.Equal(key, k) {
630 continue
631 }
632
633 if t.NeedKeyUpdate() {
634 typedmemmove(t.Key, k, key)
635 }
636 elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
637 goto done
638 }
639 ovf := b.overflow(t)
640 if ovf == nil {
641 break
642 }
643 b = ovf
644 }
645
646
647
648
649
650 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
651 hashGrow(t, h)
652 goto again
653 }
654
655 if inserti == nil {
656
657 newb := h.newoverflow(t, b)
658 inserti = &newb.tophash[0]
659 insertk = add(unsafe.Pointer(newb), dataOffset)
660 elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
661 }
662
663
664 if t.IndirectKey() {
665 kmem := newobject(t.Key)
666 *(*unsafe.Pointer)(insertk) = kmem
667 insertk = kmem
668 }
669 if t.IndirectElem() {
670 vmem := newobject(t.Elem)
671 *(*unsafe.Pointer)(elem) = vmem
672 }
673 typedmemmove(t.Key, insertk, key)
674 *inserti = top
675 h.count++
676
677 done:
678 if h.flags&hashWriting == 0 {
679 fatal("concurrent map writes")
680 }
681 h.flags &^= hashWriting
682 if t.IndirectElem() {
683 elem = *((*unsafe.Pointer)(elem))
684 }
685 return elem
686 }
687
688 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
689 if raceenabled && h != nil {
690 callerpc := getcallerpc()
691 pc := abi.FuncPCABIInternal(mapdelete)
692 racewritepc(unsafe.Pointer(h), callerpc, pc)
693 raceReadObjectPC(t.Key, key, callerpc, pc)
694 }
695 if msanenabled && h != nil {
696 msanread(key, t.Key.Size_)
697 }
698 if asanenabled && h != nil {
699 asanread(key, t.Key.Size_)
700 }
701 if h == nil || h.count == 0 {
702 if err := mapKeyError(t, key); err != nil {
703 panic(err)
704 }
705 return
706 }
707 if h.flags&hashWriting != 0 {
708 fatal("concurrent map writes")
709 }
710
711 hash := t.Hasher(key, uintptr(h.hash0))
712
713
714
715 h.flags ^= hashWriting
716
717 bucket := hash & bucketMask(h.B)
718 if h.growing() {
719 growWork(t, h, bucket)
720 }
721 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
722 bOrig := b
723 top := tophash(hash)
724 search:
725 for ; b != nil; b = b.overflow(t) {
726 for i := uintptr(0); i < abi.MapBucketCount; i++ {
727 if b.tophash[i] != top {
728 if b.tophash[i] == emptyRest {
729 break search
730 }
731 continue
732 }
733 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
734 k2 := k
735 if t.IndirectKey() {
736 k2 = *((*unsafe.Pointer)(k2))
737 }
738 if !t.Key.Equal(key, k2) {
739 continue
740 }
741
742 if t.IndirectKey() {
743 *(*unsafe.Pointer)(k) = nil
744 } else if t.Key.Pointers() {
745 memclrHasPointers(k, t.Key.Size_)
746 }
747 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
748 if t.IndirectElem() {
749 *(*unsafe.Pointer)(e) = nil
750 } else if t.Elem.Pointers() {
751 memclrHasPointers(e, t.Elem.Size_)
752 } else {
753 memclrNoHeapPointers(e, t.Elem.Size_)
754 }
755 b.tophash[i] = emptyOne
756
757
758
759
760 if i == abi.MapBucketCount-1 {
761 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
762 goto notLast
763 }
764 } else {
765 if b.tophash[i+1] != emptyRest {
766 goto notLast
767 }
768 }
769 for {
770 b.tophash[i] = emptyRest
771 if i == 0 {
772 if b == bOrig {
773 break
774 }
775
776 c := b
777 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
778 }
779 i = abi.MapBucketCount - 1
780 } else {
781 i--
782 }
783 if b.tophash[i] != emptyOne {
784 break
785 }
786 }
787 notLast:
788 h.count--
789
790
791 if h.count == 0 {
792 h.hash0 = uint32(rand())
793 }
794 break search
795 }
796 }
797
798 if h.flags&hashWriting == 0 {
799 fatal("concurrent map writes")
800 }
801 h.flags &^= hashWriting
802 }
803
804
805
806
807
808 func mapiterinit(t *maptype, h *hmap, it *hiter) {
809 if raceenabled && h != nil {
810 callerpc := getcallerpc()
811 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
812 }
813
814 it.t = t
815 if h == nil || h.count == 0 {
816 return
817 }
818
819 if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
820 throw("hash_iter size incorrect")
821 }
822 it.h = h
823
824
825 it.B = h.B
826 it.buckets = h.buckets
827 if !t.Bucket.Pointers() {
828
829
830
831
832 h.createOverflow()
833 it.overflow = h.extra.overflow
834 it.oldoverflow = h.extra.oldoverflow
835 }
836
837
838 r := uintptr(rand())
839 it.startBucket = r & bucketMask(h.B)
840 it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
841
842
843 it.bucket = it.startBucket
844
845
846
847 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
848 atomic.Or8(&h.flags, iterator|oldIterator)
849 }
850
851 mapiternext(it)
852 }
853
854 func mapiternext(it *hiter) {
855 h := it.h
856 if raceenabled {
857 callerpc := getcallerpc()
858 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
859 }
860 if h.flags&hashWriting != 0 {
861 fatal("concurrent map iteration and map write")
862 }
863 t := it.t
864 bucket := it.bucket
865 b := it.bptr
866 i := it.i
867 checkBucket := it.checkBucket
868
869 next:
870 if b == nil {
871 if bucket == it.startBucket && it.wrapped {
872
873 it.key = nil
874 it.elem = nil
875 return
876 }
877 if h.growing() && it.B == h.B {
878
879
880
881
882 oldbucket := bucket & it.h.oldbucketmask()
883 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
884 if !evacuated(b) {
885 checkBucket = bucket
886 } else {
887 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
888 checkBucket = noCheck
889 }
890 } else {
891 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
892 checkBucket = noCheck
893 }
894 bucket++
895 if bucket == bucketShift(it.B) {
896 bucket = 0
897 it.wrapped = true
898 }
899 i = 0
900 }
901 for ; i < abi.MapBucketCount; i++ {
902 offi := (i + it.offset) & (abi.MapBucketCount - 1)
903 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
904
905
906 continue
907 }
908 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
909 if t.IndirectKey() {
910 k = *((*unsafe.Pointer)(k))
911 }
912 e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
913 if checkBucket != noCheck && !h.sameSizeGrow() {
914
915
916
917
918
919
920
921 if t.ReflexiveKey() || t.Key.Equal(k, k) {
922
923
924 hash := t.Hasher(k, uintptr(h.hash0))
925 if hash&bucketMask(it.B) != checkBucket {
926 continue
927 }
928 } else {
929
930
931
932
933
934
935
936 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
937 continue
938 }
939 }
940 }
941 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
942 !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
943
944
945
946
947 it.key = k
948 if t.IndirectElem() {
949 e = *((*unsafe.Pointer)(e))
950 }
951 it.elem = e
952 } else {
953
954
955
956
957
958
959
960 rk, re := mapaccessK(t, h, k)
961 if rk == nil {
962 continue
963 }
964 it.key = rk
965 it.elem = re
966 }
967 it.bucket = bucket
968 if it.bptr != b {
969 it.bptr = b
970 }
971 it.i = i + 1
972 it.checkBucket = checkBucket
973 return
974 }
975 b = b.overflow(t)
976 i = 0
977 goto next
978 }
979
980
981 func mapclear(t *maptype, h *hmap) {
982 if raceenabled && h != nil {
983 callerpc := getcallerpc()
984 pc := abi.FuncPCABIInternal(mapclear)
985 racewritepc(unsafe.Pointer(h), callerpc, pc)
986 }
987
988 if h == nil || h.count == 0 {
989 return
990 }
991
992 if h.flags&hashWriting != 0 {
993 fatal("concurrent map writes")
994 }
995
996 h.flags ^= hashWriting
997
998
999 markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
1000 for i := uintptr(0); i <= mask; i++ {
1001 b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
1002 for ; b != nil; b = b.overflow(t) {
1003 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1004 b.tophash[i] = emptyRest
1005 }
1006 }
1007 }
1008 }
1009 markBucketsEmpty(h.buckets, bucketMask(h.B))
1010 if oldBuckets := h.oldbuckets; oldBuckets != nil {
1011 markBucketsEmpty(oldBuckets, h.oldbucketmask())
1012 }
1013
1014 h.flags &^= sameSizeGrow
1015 h.oldbuckets = nil
1016 h.nevacuate = 0
1017 h.noverflow = 0
1018 h.count = 0
1019
1020
1021
1022 h.hash0 = uint32(rand())
1023
1024
1025 if h.extra != nil {
1026 *h.extra = mapextra{}
1027 }
1028
1029
1030
1031
1032 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1033 if nextOverflow != nil {
1034
1035
1036 h.extra.nextOverflow = nextOverflow
1037 }
1038
1039 if h.flags&hashWriting == 0 {
1040 fatal("concurrent map writes")
1041 }
1042 h.flags &^= hashWriting
1043 }
1044
1045 func hashGrow(t *maptype, h *hmap) {
1046
1047
1048
1049 bigger := uint8(1)
1050 if !overLoadFactor(h.count+1, h.B) {
1051 bigger = 0
1052 h.flags |= sameSizeGrow
1053 }
1054 oldbuckets := h.buckets
1055 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1056
1057 flags := h.flags &^ (iterator | oldIterator)
1058 if h.flags&iterator != 0 {
1059 flags |= oldIterator
1060 }
1061
1062 h.B += bigger
1063 h.flags = flags
1064 h.oldbuckets = oldbuckets
1065 h.buckets = newbuckets
1066 h.nevacuate = 0
1067 h.noverflow = 0
1068
1069 if h.extra != nil && h.extra.overflow != nil {
1070
1071 if h.extra.oldoverflow != nil {
1072 throw("oldoverflow is not nil")
1073 }
1074 h.extra.oldoverflow = h.extra.overflow
1075 h.extra.overflow = nil
1076 }
1077 if nextOverflow != nil {
1078 if h.extra == nil {
1079 h.extra = new(mapextra)
1080 }
1081 h.extra.nextOverflow = nextOverflow
1082 }
1083
1084
1085
1086 }
1087
1088
1089 func overLoadFactor(count int, B uint8) bool {
1090 return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1091 }
1092
1093
1094
1095
1096 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1097
1098
1099
1100
1101 if B > 15 {
1102 B = 15
1103 }
1104
1105 return noverflow >= uint16(1)<<(B&15)
1106 }
1107
1108
1109 func (h *hmap) growing() bool {
1110 return h.oldbuckets != nil
1111 }
1112
1113
1114 func (h *hmap) sameSizeGrow() bool {
1115 return h.flags&sameSizeGrow != 0
1116 }
1117
1118
1119 func (h *hmap) noldbuckets() uintptr {
1120 oldB := h.B
1121 if !h.sameSizeGrow() {
1122 oldB--
1123 }
1124 return bucketShift(oldB)
1125 }
1126
1127
1128 func (h *hmap) oldbucketmask() uintptr {
1129 return h.noldbuckets() - 1
1130 }
1131
1132 func growWork(t *maptype, h *hmap, bucket uintptr) {
1133
1134
1135 evacuate(t, h, bucket&h.oldbucketmask())
1136
1137
1138 if h.growing() {
1139 evacuate(t, h, h.nevacuate)
1140 }
1141 }
1142
1143 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1144 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1145 return evacuated(b)
1146 }
1147
1148
1149 type evacDst struct {
1150 b *bmap
1151 i int
1152 k unsafe.Pointer
1153 e unsafe.Pointer
1154 }
1155
1156 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1157 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1158 newbit := h.noldbuckets()
1159 if !evacuated(b) {
1160
1161
1162
1163
1164 var xy [2]evacDst
1165 x := &xy[0]
1166 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1167 x.k = add(unsafe.Pointer(x.b), dataOffset)
1168 x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
1169
1170 if !h.sameSizeGrow() {
1171
1172
1173 y := &xy[1]
1174 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1175 y.k = add(unsafe.Pointer(y.b), dataOffset)
1176 y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
1177 }
1178
1179 for ; b != nil; b = b.overflow(t) {
1180 k := add(unsafe.Pointer(b), dataOffset)
1181 e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
1182 for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1183 top := b.tophash[i]
1184 if isEmpty(top) {
1185 b.tophash[i] = evacuatedEmpty
1186 continue
1187 }
1188 if top < minTopHash {
1189 throw("bad map state")
1190 }
1191 k2 := k
1192 if t.IndirectKey() {
1193 k2 = *((*unsafe.Pointer)(k2))
1194 }
1195 var useY uint8
1196 if !h.sameSizeGrow() {
1197
1198
1199 hash := t.Hasher(k2, uintptr(h.hash0))
1200 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 useY = top & 1
1213 top = tophash(hash)
1214 } else {
1215 if hash&newbit != 0 {
1216 useY = 1
1217 }
1218 }
1219 }
1220
1221 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1222 throw("bad evacuatedN")
1223 }
1224
1225 b.tophash[i] = evacuatedX + useY
1226 dst := &xy[useY]
1227
1228 if dst.i == abi.MapBucketCount {
1229 dst.b = h.newoverflow(t, dst.b)
1230 dst.i = 0
1231 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1232 dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
1233 }
1234 dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top
1235 if t.IndirectKey() {
1236 *(*unsafe.Pointer)(dst.k) = k2
1237 } else {
1238 typedmemmove(t.Key, dst.k, k)
1239 }
1240 if t.IndirectElem() {
1241 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1242 } else {
1243 typedmemmove(t.Elem, dst.e, e)
1244 }
1245 dst.i++
1246
1247
1248
1249
1250 dst.k = add(dst.k, uintptr(t.KeySize))
1251 dst.e = add(dst.e, uintptr(t.ValueSize))
1252 }
1253 }
1254
1255 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
1256 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1257
1258
1259 ptr := add(b, dataOffset)
1260 n := uintptr(t.BucketSize) - dataOffset
1261 memclrHasPointers(ptr, n)
1262 }
1263 }
1264
1265 if oldbucket == h.nevacuate {
1266 advanceEvacuationMark(h, t, newbit)
1267 }
1268 }
1269
1270 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1271 h.nevacuate++
1272
1273
1274 stop := h.nevacuate + 1024
1275 if stop > newbit {
1276 stop = newbit
1277 }
1278 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1279 h.nevacuate++
1280 }
1281 if h.nevacuate == newbit {
1282
1283 h.oldbuckets = nil
1284
1285
1286
1287 if h.extra != nil {
1288 h.extra.oldoverflow = nil
1289 }
1290 h.flags &^= sameSizeGrow
1291 }
1292 }
1293
1294
1295
1296
1297 func reflect_makemap(t *maptype, cap int) *hmap {
1298
1299 if t.Key.Equal == nil {
1300 throw("runtime.reflect_makemap: unsupported map key type")
1301 }
1302 if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1303 t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1304 throw("key size wrong")
1305 }
1306 if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1307 t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1308 throw("elem size wrong")
1309 }
1310 if t.Key.Align_ > abi.MapBucketCount {
1311 throw("key align too big")
1312 }
1313 if t.Elem.Align_ > abi.MapBucketCount {
1314 throw("elem align too big")
1315 }
1316 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1317 throw("key size not a multiple of key align")
1318 }
1319 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1320 throw("elem size not a multiple of elem align")
1321 }
1322 if abi.MapBucketCount < 8 {
1323 throw("bucketsize too small for proper alignment")
1324 }
1325 if dataOffset%uintptr(t.Key.Align_) != 0 {
1326 throw("need padding in bucket (key)")
1327 }
1328 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1329 throw("need padding in bucket (elem)")
1330 }
1331
1332 return makemap(t, cap, nil)
1333 }
1334
1335
1336 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1337 elem, ok := mapaccess2(t, h, key)
1338 if !ok {
1339
1340 elem = nil
1341 }
1342 return elem
1343 }
1344
1345
1346 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1347 elem, ok := mapaccess2_faststr(t, h, key)
1348 if !ok {
1349
1350 elem = nil
1351 }
1352 return elem
1353 }
1354
1355
1356 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1357 p := mapassign(t, h, key)
1358 typedmemmove(t.Elem, p, elem)
1359 }
1360
1361
1362 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1363 p := mapassign_faststr(t, h, key)
1364 typedmemmove(t.Elem, p, elem)
1365 }
1366
1367
1368 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1369 mapdelete(t, h, key)
1370 }
1371
1372
1373 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1374 mapdelete_faststr(t, h, key)
1375 }
1376
1377
1378 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1379 mapiterinit(t, h, it)
1380 }
1381
1382
1383 func reflect_mapiternext(it *hiter) {
1384 mapiternext(it)
1385 }
1386
1387
1388 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1389 return it.key
1390 }
1391
1392
1393 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1394 return it.elem
1395 }
1396
1397
1398 func reflect_maplen(h *hmap) int {
1399 if h == nil {
1400 return 0
1401 }
1402 if raceenabled {
1403 callerpc := getcallerpc()
1404 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1405 }
1406 return h.count
1407 }
1408
1409
1410 func reflect_mapclear(t *maptype, h *hmap) {
1411 mapclear(t, h)
1412 }
1413
1414
1415 func reflectlite_maplen(h *hmap) int {
1416 if h == nil {
1417 return 0
1418 }
1419 if raceenabled {
1420 callerpc := getcallerpc()
1421 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1422 }
1423 return h.count
1424 }
1425
1426 var zeroVal [abi.ZeroValSize]byte
1427
1428
1429
1430
1431
1432
1433 func mapinitnoop()
1434
1435
1436
1437
1438 func mapclone(m any) any {
1439 e := efaceOf(&m)
1440 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1441 return m
1442 }
1443
1444
1445
1446 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1447 for i := 0; i < abi.MapBucketCount; i++ {
1448 if isEmpty(src.tophash[i]) {
1449 continue
1450 }
1451
1452 for ; pos < abi.MapBucketCount; pos++ {
1453 if isEmpty(dst.tophash[pos]) {
1454 break
1455 }
1456 }
1457
1458 if pos == abi.MapBucketCount {
1459 dst = h.newoverflow(t, dst)
1460 pos = 0
1461 }
1462
1463 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1464 srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1465 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1466 dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1467
1468 dst.tophash[pos] = src.tophash[i]
1469 if t.IndirectKey() {
1470 srcK = *(*unsafe.Pointer)(srcK)
1471 if t.NeedKeyUpdate() {
1472 kStore := newobject(t.Key)
1473 typedmemmove(t.Key, kStore, srcK)
1474 srcK = kStore
1475 }
1476
1477
1478
1479 *(*unsafe.Pointer)(dstK) = srcK
1480 } else {
1481 typedmemmove(t.Key, dstK, srcK)
1482 }
1483 if t.IndirectElem() {
1484 srcEle = *(*unsafe.Pointer)(srcEle)
1485 eStore := newobject(t.Elem)
1486 typedmemmove(t.Elem, eStore, srcEle)
1487 *(*unsafe.Pointer)(dstEle) = eStore
1488 } else {
1489 typedmemmove(t.Elem, dstEle, srcEle)
1490 }
1491 pos++
1492 h.count++
1493 }
1494 return dst, pos
1495 }
1496
1497 func mapclone2(t *maptype, src *hmap) *hmap {
1498 dst := makemap(t, src.count, nil)
1499 dst.hash0 = src.hash0
1500 dst.nevacuate = 0
1501
1502
1503 if src.count == 0 {
1504 return dst
1505 }
1506
1507 if src.flags&hashWriting != 0 {
1508 fatal("concurrent map clone and map write")
1509 }
1510
1511 if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
1512
1513 dst.buckets = newobject(t.Bucket)
1514 dst.count = src.count
1515 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1516 return dst
1517 }
1518
1519 if dst.B == 0 {
1520 dst.buckets = newobject(t.Bucket)
1521 }
1522 dstArraySize := int(bucketShift(dst.B))
1523 srcArraySize := int(bucketShift(src.B))
1524 for i := 0; i < dstArraySize; i++ {
1525 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1526 pos := 0
1527 for j := 0; j < srcArraySize; j += dstArraySize {
1528 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1529 for srcBmap != nil {
1530 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1531 srcBmap = srcBmap.overflow(t)
1532 }
1533 }
1534 }
1535
1536 if src.oldbuckets == nil {
1537 return dst
1538 }
1539
1540 oldB := src.B
1541 srcOldbuckets := src.oldbuckets
1542 if !src.sameSizeGrow() {
1543 oldB--
1544 }
1545 oldSrcArraySize := int(bucketShift(oldB))
1546
1547 for i := 0; i < oldSrcArraySize; i++ {
1548 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1549 if evacuated(srcBmap) {
1550 continue
1551 }
1552
1553 if oldB >= dst.B {
1554 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1555 for dstBmap.overflow(t) != nil {
1556 dstBmap = dstBmap.overflow(t)
1557 }
1558 pos := 0
1559 for srcBmap != nil {
1560 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1561 srcBmap = srcBmap.overflow(t)
1562 }
1563 continue
1564 }
1565
1566
1567
1568 for srcBmap != nil {
1569
1570 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1571 if isEmpty(srcBmap.tophash[i]) {
1572 continue
1573 }
1574
1575 if src.flags&hashWriting != 0 {
1576 fatal("concurrent map clone and map write")
1577 }
1578
1579 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1580 if t.IndirectKey() {
1581 srcK = *((*unsafe.Pointer)(srcK))
1582 }
1583
1584 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1585 if t.IndirectElem() {
1586 srcEle = *((*unsafe.Pointer)(srcEle))
1587 }
1588 dstEle := mapassign(t, dst, srcK)
1589 typedmemmove(t.Elem, dstEle, srcEle)
1590 }
1591 srcBmap = srcBmap.overflow(t)
1592 }
1593 }
1594 return dst
1595 }
1596
1597
1598
1599
1600 func keys(m any, p unsafe.Pointer) {
1601 e := efaceOf(&m)
1602 t := (*maptype)(unsafe.Pointer(e._type))
1603 h := (*hmap)(e.data)
1604
1605 if h == nil || h.count == 0 {
1606 return
1607 }
1608 s := (*slice)(p)
1609 r := int(rand())
1610 offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
1611 if h.B == 0 {
1612 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1613 return
1614 }
1615 arraySize := int(bucketShift(h.B))
1616 buckets := h.buckets
1617 for i := 0; i < arraySize; i++ {
1618 bucket := (i + r) & (arraySize - 1)
1619 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1620 copyKeys(t, h, b, s, offset)
1621 }
1622
1623 if h.growing() {
1624 oldArraySize := int(h.noldbuckets())
1625 for i := 0; i < oldArraySize; i++ {
1626 bucket := (i + r) & (oldArraySize - 1)
1627 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1628 if evacuated(b) {
1629 continue
1630 }
1631 copyKeys(t, h, b, s, offset)
1632 }
1633 }
1634 return
1635 }
1636
1637 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1638 for b != nil {
1639 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1640 offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
1641 if isEmpty(b.tophash[offi]) {
1642 continue
1643 }
1644 if h.flags&hashWriting != 0 {
1645 fatal("concurrent map read and map write")
1646 }
1647 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1648 if t.IndirectKey() {
1649 k = *((*unsafe.Pointer)(k))
1650 }
1651 if s.len >= s.cap {
1652 fatal("concurrent map read and map write")
1653 }
1654 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1655 s.len++
1656 }
1657 b = b.overflow(t)
1658 }
1659 }
1660
1661
1662
1663
1664 func values(m any, p unsafe.Pointer) {
1665 e := efaceOf(&m)
1666 t := (*maptype)(unsafe.Pointer(e._type))
1667 h := (*hmap)(e.data)
1668 if h == nil || h.count == 0 {
1669 return
1670 }
1671 s := (*slice)(p)
1672 r := int(rand())
1673 offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
1674 if h.B == 0 {
1675 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1676 return
1677 }
1678 arraySize := int(bucketShift(h.B))
1679 buckets := h.buckets
1680 for i := 0; i < arraySize; i++ {
1681 bucket := (i + r) & (arraySize - 1)
1682 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1683 copyValues(t, h, b, s, offset)
1684 }
1685
1686 if h.growing() {
1687 oldArraySize := int(h.noldbuckets())
1688 for i := 0; i < oldArraySize; i++ {
1689 bucket := (i + r) & (oldArraySize - 1)
1690 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1691 if evacuated(b) {
1692 continue
1693 }
1694 copyValues(t, h, b, s, offset)
1695 }
1696 }
1697 return
1698 }
1699
1700 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1701 for b != nil {
1702 for i := uintptr(0); i < abi.MapBucketCount; i++ {
1703 offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
1704 if isEmpty(b.tophash[offi]) {
1705 continue
1706 }
1707
1708 if h.flags&hashWriting != 0 {
1709 fatal("concurrent map read and map write")
1710 }
1711
1712 ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1713 if t.IndirectElem() {
1714 ele = *((*unsafe.Pointer)(ele))
1715 }
1716 if s.len >= s.cap {
1717 fatal("concurrent map read and map write")
1718 }
1719 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
1720 s.len++
1721 }
1722 b = b.overflow(t)
1723 }
1724 }
1725
View as plain text