Source file
src/runtime/map_noswiss.go
1
2
3
4
5
6
7 package runtime
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/runtime/atomic"
62 "internal/runtime/maps"
63 "internal/runtime/math"
64 "internal/runtime/sys"
65 "unsafe"
66 )
67
68 type maptype = abi.OldMapType
69
70 const (
71
72 bucketCntBits = abi.OldMapBucketCountBits
73
74
75
76
77 loadFactorDen = 2
78 loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16
79
80
81
82
83 dataOffset = unsafe.Offsetof(struct {
84 b bmap
85 v int64
86 }{}.v)
87
88
89
90
91
92 emptyRest = 0
93 emptyOne = 1
94 evacuatedX = 2
95 evacuatedY = 3
96 evacuatedEmpty = 4
97 minTopHash = 5
98
99
100 iterator = 1
101 oldIterator = 2
102 hashWriting = 4
103 sameSizeGrow = 8
104
105
106 noCheck = 1<<(8*goarch.PtrSize) - 1
107 )
108
109
110 func isEmpty(x uint8) bool {
111 return x <= emptyOne
112 }
113
114
115 type hmap struct {
116
117
118 count int
119 flags uint8
120 B uint8
121 noverflow uint16
122 hash0 uint32
123
124 buckets unsafe.Pointer
125 oldbuckets unsafe.Pointer
126 nevacuate uintptr
127 clearSeq uint64
128
129 extra *mapextra
130 }
131
132
133 type mapextra struct {
134
135
136
137
138
139
140
141
142 overflow *[]*bmap
143 oldoverflow *[]*bmap
144
145
146 nextOverflow *bmap
147 }
148
149
150 type bmap struct {
151
152
153
154 tophash [abi.OldMapBucketCount]uint8
155
156
157
158
159
160 }
161
162
163
164
165 type hiter struct {
166 key unsafe.Pointer
167 elem unsafe.Pointer
168 t *maptype
169 h *hmap
170 buckets unsafe.Pointer
171 bptr *bmap
172 overflow *[]*bmap
173 oldoverflow *[]*bmap
174 startBucket uintptr
175 offset uint8
176 wrapped bool
177 B uint8
178 i uint8
179 bucket uintptr
180 checkBucket uintptr
181 clearSeq uint64
182 }
183
184
185 func bucketShift(b uint8) uintptr {
186
187 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
188 }
189
190
191 func bucketMask(b uint8) uintptr {
192 return bucketShift(b) - 1
193 }
194
195
196 func tophash(hash uintptr) uint8 {
197 top := uint8(hash >> (goarch.PtrSize*8 - 8))
198 if top < minTopHash {
199 top += minTopHash
200 }
201 return top
202 }
203
204 func evacuated(b *bmap) bool {
205 h := b.tophash[0]
206 return h > emptyOne && h < minTopHash
207 }
208
209 func (b *bmap) overflow(t *maptype) *bmap {
210 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
211 }
212
213 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
214 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
215 }
216
217 func (b *bmap) keys() unsafe.Pointer {
218 return add(unsafe.Pointer(b), dataOffset)
219 }
220
221
222
223
224
225
226
227
228 func (h *hmap) incrnoverflow() {
229
230
231
232 if h.B < 16 {
233 h.noverflow++
234 return
235 }
236
237
238
239 mask := uint32(1)<<(h.B-15) - 1
240
241
242 if uint32(rand())&mask == 0 {
243 h.noverflow++
244 }
245 }
246
247 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
248 var ovf *bmap
249 if h.extra != nil && h.extra.nextOverflow != nil {
250
251
252 ovf = h.extra.nextOverflow
253 if ovf.overflow(t) == nil {
254
255 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
256 } else {
257
258
259
260 ovf.setoverflow(t, nil)
261 h.extra.nextOverflow = nil
262 }
263 } else {
264 ovf = (*bmap)(newobject(t.Bucket))
265 }
266 h.incrnoverflow()
267 if !t.Bucket.Pointers() {
268 h.createOverflow()
269 *h.extra.overflow = append(*h.extra.overflow, ovf)
270 }
271 b.setoverflow(t, ovf)
272 return ovf
273 }
274
275 func (h *hmap) createOverflow() {
276 if h.extra == nil {
277 h.extra = new(mapextra)
278 }
279 if h.extra.overflow == nil {
280 h.extra.overflow = new([]*bmap)
281 }
282 }
283
284 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
285 if int64(int(hint)) != hint {
286 hint = 0
287 }
288 return makemap(t, int(hint), h)
289 }
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304 func makemap_small() *hmap {
305 h := new(hmap)
306 h.hash0 = uint32(rand())
307 return h
308 }
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325 func makemap(t *maptype, hint int, h *hmap) *hmap {
326 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
327 if overflow || mem > maxAlloc {
328 hint = 0
329 }
330
331
332 if h == nil {
333 h = new(hmap)
334 }
335 h.hash0 = uint32(rand())
336
337
338
339 B := uint8(0)
340 for overLoadFactor(hint, B) {
341 B++
342 }
343 h.B = B
344
345
346
347
348 if h.B != 0 {
349 var nextOverflow *bmap
350 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
351 if nextOverflow != nil {
352 h.extra = new(mapextra)
353 h.extra.nextOverflow = nextOverflow
354 }
355 }
356
357 return h
358 }
359
360
361
362
363
364
365
366 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
367 base := bucketShift(b)
368 nbuckets := base
369
370
371 if b >= 4 {
372
373
374
375 nbuckets += bucketShift(b - 4)
376 sz := t.Bucket.Size_ * nbuckets
377 up := roundupsize(sz, !t.Bucket.Pointers())
378 if up != sz {
379 nbuckets = up / t.Bucket.Size_
380 }
381 }
382
383 if dirtyalloc == nil {
384 buckets = newarray(t.Bucket, int(nbuckets))
385 } else {
386
387
388
389 buckets = dirtyalloc
390 size := t.Bucket.Size_ * nbuckets
391 if t.Bucket.Pointers() {
392 memclrHasPointers(buckets, size)
393 } else {
394 memclrNoHeapPointers(buckets, size)
395 }
396 }
397
398 if base != nbuckets {
399
400
401
402
403
404 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
405 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
406 last.setoverflow(t, (*bmap)(buckets))
407 }
408 return buckets, nextOverflow
409 }
410
411
412
413
414
415
416 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
417 if raceenabled && h != nil {
418 callerpc := sys.GetCallerPC()
419 pc := abi.FuncPCABIInternal(mapaccess1)
420 racereadpc(unsafe.Pointer(h), callerpc, pc)
421 raceReadObjectPC(t.Key, key, callerpc, pc)
422 }
423 if msanenabled && h != nil {
424 msanread(key, t.Key.Size_)
425 }
426 if asanenabled && h != nil {
427 asanread(key, t.Key.Size_)
428 }
429 if h == nil || h.count == 0 {
430 if err := maps.OldMapKeyError(t, key); err != nil {
431 panic(err)
432 }
433 return unsafe.Pointer(&zeroVal[0])
434 }
435 if h.flags&hashWriting != 0 {
436 fatal("concurrent map read and map write")
437 }
438 hash := t.Hasher(key, uintptr(h.hash0))
439 m := bucketMask(h.B)
440 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
441 if c := h.oldbuckets; c != nil {
442 if !h.sameSizeGrow() {
443
444 m >>= 1
445 }
446 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
447 if !evacuated(oldb) {
448 b = oldb
449 }
450 }
451 top := tophash(hash)
452 bucketloop:
453 for ; b != nil; b = b.overflow(t) {
454 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
455 if b.tophash[i] != top {
456 if b.tophash[i] == emptyRest {
457 break bucketloop
458 }
459 continue
460 }
461 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
462 if t.IndirectKey() {
463 k = *((*unsafe.Pointer)(k))
464 }
465 if t.Key.Equal(key, k) {
466 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
467 if t.IndirectElem() {
468 e = *((*unsafe.Pointer)(e))
469 }
470 return e
471 }
472 }
473 }
474 return unsafe.Pointer(&zeroVal[0])
475 }
476
477
478
479
480
481
482
483
484
485
486 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
487 if raceenabled && h != nil {
488 callerpc := sys.GetCallerPC()
489 pc := abi.FuncPCABIInternal(mapaccess2)
490 racereadpc(unsafe.Pointer(h), callerpc, pc)
491 raceReadObjectPC(t.Key, key, callerpc, pc)
492 }
493 if msanenabled && h != nil {
494 msanread(key, t.Key.Size_)
495 }
496 if asanenabled && h != nil {
497 asanread(key, t.Key.Size_)
498 }
499 if h == nil || h.count == 0 {
500 if err := maps.OldMapKeyError(t, key); err != nil {
501 panic(err)
502 }
503 return unsafe.Pointer(&zeroVal[0]), false
504 }
505 if h.flags&hashWriting != 0 {
506 fatal("concurrent map read and map write")
507 }
508 hash := t.Hasher(key, uintptr(h.hash0))
509 m := bucketMask(h.B)
510 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
511 if c := h.oldbuckets; c != nil {
512 if !h.sameSizeGrow() {
513
514 m >>= 1
515 }
516 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
517 if !evacuated(oldb) {
518 b = oldb
519 }
520 }
521 top := tophash(hash)
522 bucketloop:
523 for ; b != nil; b = b.overflow(t) {
524 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
525 if b.tophash[i] != top {
526 if b.tophash[i] == emptyRest {
527 break bucketloop
528 }
529 continue
530 }
531 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
532 if t.IndirectKey() {
533 k = *((*unsafe.Pointer)(k))
534 }
535 if t.Key.Equal(key, k) {
536 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
537 if t.IndirectElem() {
538 e = *((*unsafe.Pointer)(e))
539 }
540 return e, true
541 }
542 }
543 }
544 return unsafe.Pointer(&zeroVal[0]), false
545 }
546
547
548 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
549 if h == nil || h.count == 0 {
550 return nil, nil
551 }
552 hash := t.Hasher(key, uintptr(h.hash0))
553 m := bucketMask(h.B)
554 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
555 if c := h.oldbuckets; c != nil {
556 if !h.sameSizeGrow() {
557
558 m >>= 1
559 }
560 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
561 if !evacuated(oldb) {
562 b = oldb
563 }
564 }
565 top := tophash(hash)
566 bucketloop:
567 for ; b != nil; b = b.overflow(t) {
568 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
569 if b.tophash[i] != top {
570 if b.tophash[i] == emptyRest {
571 break bucketloop
572 }
573 continue
574 }
575 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
576 if t.IndirectKey() {
577 k = *((*unsafe.Pointer)(k))
578 }
579 if t.Key.Equal(key, k) {
580 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
581 if t.IndirectElem() {
582 e = *((*unsafe.Pointer)(e))
583 }
584 return k, e
585 }
586 }
587 }
588 return nil, nil
589 }
590
591 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
592 e := mapaccess1(t, h, key)
593 if e == unsafe.Pointer(&zeroVal[0]) {
594 return zero
595 }
596 return e
597 }
598
599 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
600 e := mapaccess1(t, h, key)
601 if e == unsafe.Pointer(&zeroVal[0]) {
602 return zero, false
603 }
604 return e, true
605 }
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
622 if h == nil {
623 panic(plainError("assignment to entry in nil map"))
624 }
625 if raceenabled {
626 callerpc := sys.GetCallerPC()
627 pc := abi.FuncPCABIInternal(mapassign)
628 racewritepc(unsafe.Pointer(h), callerpc, pc)
629 raceReadObjectPC(t.Key, key, callerpc, pc)
630 }
631 if msanenabled {
632 msanread(key, t.Key.Size_)
633 }
634 if asanenabled {
635 asanread(key, t.Key.Size_)
636 }
637 if h.flags&hashWriting != 0 {
638 fatal("concurrent map writes")
639 }
640 hash := t.Hasher(key, uintptr(h.hash0))
641
642
643
644 h.flags ^= hashWriting
645
646 if h.buckets == nil {
647 h.buckets = newobject(t.Bucket)
648 }
649
650 again:
651 bucket := hash & bucketMask(h.B)
652 if h.growing() {
653 growWork(t, h, bucket)
654 }
655 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
656 top := tophash(hash)
657
658 var inserti *uint8
659 var insertk unsafe.Pointer
660 var elem unsafe.Pointer
661 bucketloop:
662 for {
663 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
664 if b.tophash[i] != top {
665 if isEmpty(b.tophash[i]) && inserti == nil {
666 inserti = &b.tophash[i]
667 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
668 elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
669 }
670 if b.tophash[i] == emptyRest {
671 break bucketloop
672 }
673 continue
674 }
675 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
676 if t.IndirectKey() {
677 k = *((*unsafe.Pointer)(k))
678 }
679 if !t.Key.Equal(key, k) {
680 continue
681 }
682
683 if t.NeedKeyUpdate() {
684 typedmemmove(t.Key, k, key)
685 }
686 elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
687 goto done
688 }
689 ovf := b.overflow(t)
690 if ovf == nil {
691 break
692 }
693 b = ovf
694 }
695
696
697
698
699
700 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
701 hashGrow(t, h)
702 goto again
703 }
704
705 if inserti == nil {
706
707 newb := h.newoverflow(t, b)
708 inserti = &newb.tophash[0]
709 insertk = add(unsafe.Pointer(newb), dataOffset)
710 elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize))
711 }
712
713
714 if t.IndirectKey() {
715 kmem := newobject(t.Key)
716 *(*unsafe.Pointer)(insertk) = kmem
717 insertk = kmem
718 }
719 if t.IndirectElem() {
720 vmem := newobject(t.Elem)
721 *(*unsafe.Pointer)(elem) = vmem
722 }
723 typedmemmove(t.Key, insertk, key)
724 *inserti = top
725 h.count++
726
727 done:
728 if h.flags&hashWriting == 0 {
729 fatal("concurrent map writes")
730 }
731 h.flags &^= hashWriting
732 if t.IndirectElem() {
733 elem = *((*unsafe.Pointer)(elem))
734 }
735 return elem
736 }
737
738
739
740
741
742
743
744
745
746
747 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
748 if raceenabled && h != nil {
749 callerpc := sys.GetCallerPC()
750 pc := abi.FuncPCABIInternal(mapdelete)
751 racewritepc(unsafe.Pointer(h), callerpc, pc)
752 raceReadObjectPC(t.Key, key, callerpc, pc)
753 }
754 if msanenabled && h != nil {
755 msanread(key, t.Key.Size_)
756 }
757 if asanenabled && h != nil {
758 asanread(key, t.Key.Size_)
759 }
760 if h == nil || h.count == 0 {
761 if err := maps.OldMapKeyError(t, key); err != nil {
762 panic(err)
763 }
764 return
765 }
766 if h.flags&hashWriting != 0 {
767 fatal("concurrent map writes")
768 }
769
770 hash := t.Hasher(key, uintptr(h.hash0))
771
772
773
774 h.flags ^= hashWriting
775
776 bucket := hash & bucketMask(h.B)
777 if h.growing() {
778 growWork(t, h, bucket)
779 }
780 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
781 bOrig := b
782 top := tophash(hash)
783 search:
784 for ; b != nil; b = b.overflow(t) {
785 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
786 if b.tophash[i] != top {
787 if b.tophash[i] == emptyRest {
788 break search
789 }
790 continue
791 }
792 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
793 k2 := k
794 if t.IndirectKey() {
795 k2 = *((*unsafe.Pointer)(k2))
796 }
797 if !t.Key.Equal(key, k2) {
798 continue
799 }
800
801 if t.IndirectKey() {
802 *(*unsafe.Pointer)(k) = nil
803 } else if t.Key.Pointers() {
804 memclrHasPointers(k, t.Key.Size_)
805 }
806 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
807 if t.IndirectElem() {
808 *(*unsafe.Pointer)(e) = nil
809 } else if t.Elem.Pointers() {
810 memclrHasPointers(e, t.Elem.Size_)
811 } else {
812 memclrNoHeapPointers(e, t.Elem.Size_)
813 }
814 b.tophash[i] = emptyOne
815
816
817
818
819 if i == abi.OldMapBucketCount-1 {
820 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
821 goto notLast
822 }
823 } else {
824 if b.tophash[i+1] != emptyRest {
825 goto notLast
826 }
827 }
828 for {
829 b.tophash[i] = emptyRest
830 if i == 0 {
831 if b == bOrig {
832 break
833 }
834
835 c := b
836 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
837 }
838 i = abi.OldMapBucketCount - 1
839 } else {
840 i--
841 }
842 if b.tophash[i] != emptyOne {
843 break
844 }
845 }
846 notLast:
847 h.count--
848
849
850 if h.count == 0 {
851 h.hash0 = uint32(rand())
852 }
853 break search
854 }
855 }
856
857 if h.flags&hashWriting == 0 {
858 fatal("concurrent map writes")
859 }
860 h.flags &^= hashWriting
861 }
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882 func mapiterinit(t *maptype, h *hmap, it *hiter) {
883 if raceenabled && h != nil {
884 callerpc := sys.GetCallerPC()
885 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
886 }
887
888 it.t = t
889 if h == nil || h.count == 0 {
890 return
891 }
892
893 if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize {
894 throw("hash_iter size incorrect")
895 }
896 it.h = h
897 it.clearSeq = h.clearSeq
898
899
900 it.B = h.B
901 it.buckets = h.buckets
902 if !t.Bucket.Pointers() {
903
904
905
906
907 h.createOverflow()
908 it.overflow = h.extra.overflow
909 it.oldoverflow = h.extra.oldoverflow
910 }
911
912
913 r := uintptr(rand())
914 it.startBucket = r & bucketMask(h.B)
915 it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1))
916
917
918 it.bucket = it.startBucket
919
920
921
922 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
923 atomic.Or8(&h.flags, iterator|oldIterator)
924 }
925
926 mapiternext(it)
927 }
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942 func mapiternext(it *hiter) {
943 h := it.h
944 if raceenabled {
945 callerpc := sys.GetCallerPC()
946 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
947 }
948 if h.flags&hashWriting != 0 {
949 fatal("concurrent map iteration and map write")
950 }
951 t := it.t
952 bucket := it.bucket
953 b := it.bptr
954 i := it.i
955 checkBucket := it.checkBucket
956
957 next:
958 if b == nil {
959 if bucket == it.startBucket && it.wrapped {
960
961 it.key = nil
962 it.elem = nil
963 return
964 }
965 if h.growing() && it.B == h.B {
966
967
968
969
970 oldbucket := bucket & it.h.oldbucketmask()
971 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
972 if !evacuated(b) {
973 checkBucket = bucket
974 } else {
975 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
976 checkBucket = noCheck
977 }
978 } else {
979 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
980 checkBucket = noCheck
981 }
982 bucket++
983 if bucket == bucketShift(it.B) {
984 bucket = 0
985 it.wrapped = true
986 }
987 i = 0
988 }
989 for ; i < abi.OldMapBucketCount; i++ {
990 offi := (i + it.offset) & (abi.OldMapBucketCount - 1)
991 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
992
993
994 continue
995 }
996 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
997 if t.IndirectKey() {
998 k = *((*unsafe.Pointer)(k))
999 }
1000 e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
1001 if checkBucket != noCheck && !h.sameSizeGrow() {
1002
1003
1004
1005
1006
1007
1008
1009 if t.ReflexiveKey() || t.Key.Equal(k, k) {
1010
1011
1012 hash := t.Hasher(k, uintptr(h.hash0))
1013 if hash&bucketMask(it.B) != checkBucket {
1014 continue
1015 }
1016 } else {
1017
1018
1019
1020
1021
1022
1023
1024 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
1025 continue
1026 }
1027 }
1028 }
1029 if it.clearSeq == h.clearSeq &&
1030 ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
1031 !(t.ReflexiveKey() || t.Key.Equal(k, k))) {
1032
1033
1034
1035
1036 it.key = k
1037 if t.IndirectElem() {
1038 e = *((*unsafe.Pointer)(e))
1039 }
1040 it.elem = e
1041 } else {
1042
1043
1044
1045
1046
1047
1048
1049 rk, re := mapaccessK(t, h, k)
1050 if rk == nil {
1051 continue
1052 }
1053 it.key = rk
1054 it.elem = re
1055 }
1056 it.bucket = bucket
1057 if it.bptr != b {
1058 it.bptr = b
1059 }
1060 it.i = i + 1
1061 it.checkBucket = checkBucket
1062 return
1063 }
1064 b = b.overflow(t)
1065 i = 0
1066 goto next
1067 }
1068
1069
1070
1071 func mapclear(t *maptype, h *hmap) {
1072 if raceenabled && h != nil {
1073 callerpc := sys.GetCallerPC()
1074 pc := abi.FuncPCABIInternal(mapclear)
1075 racewritepc(unsafe.Pointer(h), callerpc, pc)
1076 }
1077
1078 if h == nil || h.count == 0 {
1079 return
1080 }
1081
1082 if h.flags&hashWriting != 0 {
1083 fatal("concurrent map writes")
1084 }
1085
1086 h.flags ^= hashWriting
1087 h.flags &^= sameSizeGrow
1088 h.oldbuckets = nil
1089 h.nevacuate = 0
1090 h.noverflow = 0
1091 h.count = 0
1092 h.clearSeq++
1093
1094
1095
1096 h.hash0 = uint32(rand())
1097
1098
1099 if h.extra != nil {
1100 *h.extra = mapextra{}
1101 }
1102
1103
1104
1105
1106 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1107 if nextOverflow != nil {
1108
1109
1110 h.extra.nextOverflow = nextOverflow
1111 }
1112
1113 if h.flags&hashWriting == 0 {
1114 fatal("concurrent map writes")
1115 }
1116 h.flags &^= hashWriting
1117 }
1118
1119 func hashGrow(t *maptype, h *hmap) {
1120
1121
1122
1123 bigger := uint8(1)
1124 if !overLoadFactor(h.count+1, h.B) {
1125 bigger = 0
1126 h.flags |= sameSizeGrow
1127 }
1128 oldbuckets := h.buckets
1129 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1130
1131 flags := h.flags &^ (iterator | oldIterator)
1132 if h.flags&iterator != 0 {
1133 flags |= oldIterator
1134 }
1135
1136 h.B += bigger
1137 h.flags = flags
1138 h.oldbuckets = oldbuckets
1139 h.buckets = newbuckets
1140 h.nevacuate = 0
1141 h.noverflow = 0
1142
1143 if h.extra != nil && h.extra.overflow != nil {
1144
1145 if h.extra.oldoverflow != nil {
1146 throw("oldoverflow is not nil")
1147 }
1148 h.extra.oldoverflow = h.extra.overflow
1149 h.extra.overflow = nil
1150 }
1151 if nextOverflow != nil {
1152 if h.extra == nil {
1153 h.extra = new(mapextra)
1154 }
1155 h.extra.nextOverflow = nextOverflow
1156 }
1157
1158
1159
1160 }
1161
1162
1163 func overLoadFactor(count int, B uint8) bool {
1164 return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1165 }
1166
1167
1168
1169
1170 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1171
1172
1173
1174
1175 if B > 15 {
1176 B = 15
1177 }
1178
1179 return noverflow >= uint16(1)<<(B&15)
1180 }
1181
1182
1183 func (h *hmap) growing() bool {
1184 return h.oldbuckets != nil
1185 }
1186
1187
1188 func (h *hmap) sameSizeGrow() bool {
1189 return h.flags&sameSizeGrow != 0
1190 }
1191
1192
1193 func sameSizeGrowForIssue69110Test(h *hmap) bool {
1194 return h.sameSizeGrow()
1195 }
1196
1197
1198 func (h *hmap) noldbuckets() uintptr {
1199 oldB := h.B
1200 if !h.sameSizeGrow() {
1201 oldB--
1202 }
1203 return bucketShift(oldB)
1204 }
1205
1206
1207 func (h *hmap) oldbucketmask() uintptr {
1208 return h.noldbuckets() - 1
1209 }
1210
1211 func growWork(t *maptype, h *hmap, bucket uintptr) {
1212
1213
1214 evacuate(t, h, bucket&h.oldbucketmask())
1215
1216
1217 if h.growing() {
1218 evacuate(t, h, h.nevacuate)
1219 }
1220 }
1221
1222 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1223 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1224 return evacuated(b)
1225 }
1226
1227
1228 type evacDst struct {
1229 b *bmap
1230 i int
1231 k unsafe.Pointer
1232 e unsafe.Pointer
1233 }
1234
1235 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1236 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1237 newbit := h.noldbuckets()
1238 if !evacuated(b) {
1239
1240
1241
1242
1243 var xy [2]evacDst
1244 x := &xy[0]
1245 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1246 x.k = add(unsafe.Pointer(x.b), dataOffset)
1247 x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize))
1248
1249 if !h.sameSizeGrow() {
1250
1251
1252 y := &xy[1]
1253 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1254 y.k = add(unsafe.Pointer(y.b), dataOffset)
1255 y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize))
1256 }
1257
1258 for ; b != nil; b = b.overflow(t) {
1259 k := add(unsafe.Pointer(b), dataOffset)
1260 e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize))
1261 for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1262 top := b.tophash[i]
1263 if isEmpty(top) {
1264 b.tophash[i] = evacuatedEmpty
1265 continue
1266 }
1267 if top < minTopHash {
1268 throw("bad map state")
1269 }
1270 k2 := k
1271 if t.IndirectKey() {
1272 k2 = *((*unsafe.Pointer)(k2))
1273 }
1274 var useY uint8
1275 if !h.sameSizeGrow() {
1276
1277
1278 hash := t.Hasher(k2, uintptr(h.hash0))
1279 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 useY = top & 1
1292 top = tophash(hash)
1293 } else {
1294 if hash&newbit != 0 {
1295 useY = 1
1296 }
1297 }
1298 }
1299
1300 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1301 throw("bad evacuatedN")
1302 }
1303
1304 b.tophash[i] = evacuatedX + useY
1305 dst := &xy[useY]
1306
1307 if dst.i == abi.OldMapBucketCount {
1308 dst.b = h.newoverflow(t, dst.b)
1309 dst.i = 0
1310 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1311 dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize))
1312 }
1313 dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top
1314 if t.IndirectKey() {
1315 *(*unsafe.Pointer)(dst.k) = k2
1316 } else {
1317 typedmemmove(t.Key, dst.k, k)
1318 }
1319 if t.IndirectElem() {
1320 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1321 } else {
1322 typedmemmove(t.Elem, dst.e, e)
1323 }
1324 dst.i++
1325
1326
1327
1328
1329 dst.k = add(dst.k, uintptr(t.KeySize))
1330 dst.e = add(dst.e, uintptr(t.ValueSize))
1331 }
1332 }
1333
1334 if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
1335 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1336
1337
1338 ptr := add(b, dataOffset)
1339 n := uintptr(t.BucketSize) - dataOffset
1340 memclrHasPointers(ptr, n)
1341 }
1342 }
1343
1344 if oldbucket == h.nevacuate {
1345 advanceEvacuationMark(h, t, newbit)
1346 }
1347 }
1348
1349 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1350 h.nevacuate++
1351
1352
1353 stop := h.nevacuate + 1024
1354 if stop > newbit {
1355 stop = newbit
1356 }
1357 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1358 h.nevacuate++
1359 }
1360 if h.nevacuate == newbit {
1361
1362 h.oldbuckets = nil
1363
1364
1365
1366 if h.extra != nil {
1367 h.extra.oldoverflow = nil
1368 }
1369 h.flags &^= sameSizeGrow
1370 }
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 func reflect_makemap(t *maptype, cap int) *hmap {
1390
1391 if t.Key.Equal == nil {
1392 throw("runtime.reflect_makemap: unsupported map key type")
1393 }
1394 if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1395 t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1396 throw("key size wrong")
1397 }
1398 if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1399 t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1400 throw("elem size wrong")
1401 }
1402 if t.Key.Align_ > abi.OldMapBucketCount {
1403 throw("key align too big")
1404 }
1405 if t.Elem.Align_ > abi.OldMapBucketCount {
1406 throw("elem align too big")
1407 }
1408 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1409 throw("key size not a multiple of key align")
1410 }
1411 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1412 throw("elem size not a multiple of elem align")
1413 }
1414 if abi.OldMapBucketCount < 8 {
1415 throw("bucketsize too small for proper alignment")
1416 }
1417 if dataOffset%uintptr(t.Key.Align_) != 0 {
1418 throw("need padding in bucket (key)")
1419 }
1420 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1421 throw("need padding in bucket (elem)")
1422 }
1423
1424 return makemap(t, cap, nil)
1425 }
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1439 elem, ok := mapaccess2(t, h, key)
1440 if !ok {
1441
1442 elem = nil
1443 }
1444 return elem
1445 }
1446
1447
1448 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1449 elem, ok := mapaccess2_faststr(t, h, key)
1450 if !ok {
1451
1452 elem = nil
1453 }
1454 return elem
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1467 p := mapassign(t, h, key)
1468 typedmemmove(t.Elem, p, elem)
1469 }
1470
1471
1472 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1473 p := mapassign_faststr(t, h, key)
1474 typedmemmove(t.Elem, p, elem)
1475 }
1476
1477
1478 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1479 mapdelete(t, h, key)
1480 }
1481
1482
1483 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1484 mapdelete_faststr(t, h, key)
1485 }
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1500 mapiterinit(t, h, it)
1501 }
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 func reflect_mapiternext(it *hiter) {
1517 mapiternext(it)
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1531 return it.key
1532 }
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1545 return it.elem
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 func reflect_maplen(h *hmap) int {
1559 if h == nil {
1560 return 0
1561 }
1562 if raceenabled {
1563 callerpc := sys.GetCallerPC()
1564 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1565 }
1566 return h.count
1567 }
1568
1569
1570 func reflect_mapclear(t *maptype, h *hmap) {
1571 mapclear(t, h)
1572 }
1573
1574
1575 func reflectlite_maplen(h *hmap) int {
1576 if h == nil {
1577 return 0
1578 }
1579 if raceenabled {
1580 callerpc := sys.GetCallerPC()
1581 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1582 }
1583 return h.count
1584 }
1585
1586
1587
1588
1589
1590
1591 func mapinitnoop()
1592
1593
1594
1595
1596 func mapclone(m any) any {
1597 e := efaceOf(&m)
1598 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1599 return m
1600 }
1601
1602
1603
1604 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1605 for i := 0; i < abi.OldMapBucketCount; i++ {
1606 if isEmpty(src.tophash[i]) {
1607 continue
1608 }
1609
1610 for ; pos < abi.OldMapBucketCount; pos++ {
1611 if isEmpty(dst.tophash[pos]) {
1612 break
1613 }
1614 }
1615
1616 if pos == abi.OldMapBucketCount {
1617 dst = h.newoverflow(t, dst)
1618 pos = 0
1619 }
1620
1621 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1622 srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1623 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1624 dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1625
1626 dst.tophash[pos] = src.tophash[i]
1627 if t.IndirectKey() {
1628 srcK = *(*unsafe.Pointer)(srcK)
1629 if t.NeedKeyUpdate() {
1630 kStore := newobject(t.Key)
1631 typedmemmove(t.Key, kStore, srcK)
1632 srcK = kStore
1633 }
1634
1635
1636
1637 *(*unsafe.Pointer)(dstK) = srcK
1638 } else {
1639 typedmemmove(t.Key, dstK, srcK)
1640 }
1641 if t.IndirectElem() {
1642 srcEle = *(*unsafe.Pointer)(srcEle)
1643 eStore := newobject(t.Elem)
1644 typedmemmove(t.Elem, eStore, srcEle)
1645 *(*unsafe.Pointer)(dstEle) = eStore
1646 } else {
1647 typedmemmove(t.Elem, dstEle, srcEle)
1648 }
1649 pos++
1650 h.count++
1651 }
1652 return dst, pos
1653 }
1654
1655 func mapclone2(t *maptype, src *hmap) *hmap {
1656 hint := src.count
1657 if overLoadFactor(hint, src.B) {
1658
1659
1660
1661
1662
1663 hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
1664 }
1665 dst := makemap(t, hint, nil)
1666 dst.hash0 = src.hash0
1667 dst.nevacuate = 0
1668
1669
1670 if src.count == 0 {
1671 return dst
1672 }
1673
1674 if src.flags&hashWriting != 0 {
1675 fatal("concurrent map clone and map write")
1676 }
1677
1678 if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
1679
1680 dst.buckets = newobject(t.Bucket)
1681 dst.count = src.count
1682 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1683 return dst
1684 }
1685
1686 if dst.B == 0 {
1687 dst.buckets = newobject(t.Bucket)
1688 }
1689 dstArraySize := int(bucketShift(dst.B))
1690 srcArraySize := int(bucketShift(src.B))
1691 for i := 0; i < dstArraySize; i++ {
1692 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1693 pos := 0
1694 for j := 0; j < srcArraySize; j += dstArraySize {
1695 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1696 for srcBmap != nil {
1697 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1698 srcBmap = srcBmap.overflow(t)
1699 }
1700 }
1701 }
1702
1703 if src.oldbuckets == nil {
1704 return dst
1705 }
1706
1707 oldB := src.B
1708 srcOldbuckets := src.oldbuckets
1709 if !src.sameSizeGrow() {
1710 oldB--
1711 }
1712 oldSrcArraySize := int(bucketShift(oldB))
1713
1714 for i := 0; i < oldSrcArraySize; i++ {
1715 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1716 if evacuated(srcBmap) {
1717 continue
1718 }
1719
1720 if oldB >= dst.B {
1721 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1722 for dstBmap.overflow(t) != nil {
1723 dstBmap = dstBmap.overflow(t)
1724 }
1725 pos := 0
1726 for srcBmap != nil {
1727 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1728 srcBmap = srcBmap.overflow(t)
1729 }
1730 continue
1731 }
1732
1733
1734
1735 for srcBmap != nil {
1736
1737 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
1738 if isEmpty(srcBmap.tophash[i]) {
1739 continue
1740 }
1741
1742 if src.flags&hashWriting != 0 {
1743 fatal("concurrent map clone and map write")
1744 }
1745
1746 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1747 if t.IndirectKey() {
1748 srcK = *((*unsafe.Pointer)(srcK))
1749 }
1750
1751 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1752 if t.IndirectElem() {
1753 srcEle = *((*unsafe.Pointer)(srcEle))
1754 }
1755 dstEle := mapassign(t, dst, srcK)
1756 typedmemmove(t.Elem, dstEle, srcEle)
1757 }
1758 srcBmap = srcBmap.overflow(t)
1759 }
1760 }
1761 return dst
1762 }
1763
1764
1765
1766
1767 func keys(m any, p unsafe.Pointer) {
1768 e := efaceOf(&m)
1769 t := (*maptype)(unsafe.Pointer(e._type))
1770 h := (*hmap)(e.data)
1771
1772 if h == nil || h.count == 0 {
1773 return
1774 }
1775 s := (*slice)(p)
1776 r := int(rand())
1777 offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
1778 if h.B == 0 {
1779 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1780 return
1781 }
1782 arraySize := int(bucketShift(h.B))
1783 buckets := h.buckets
1784 for i := 0; i < arraySize; i++ {
1785 bucket := (i + r) & (arraySize - 1)
1786 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1787 copyKeys(t, h, b, s, offset)
1788 }
1789
1790 if h.growing() {
1791 oldArraySize := int(h.noldbuckets())
1792 for i := 0; i < oldArraySize; i++ {
1793 bucket := (i + r) & (oldArraySize - 1)
1794 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1795 if evacuated(b) {
1796 continue
1797 }
1798 copyKeys(t, h, b, s, offset)
1799 }
1800 }
1801 return
1802 }
1803
1804 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1805 for b != nil {
1806 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
1807 offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
1808 if isEmpty(b.tophash[offi]) {
1809 continue
1810 }
1811 if h.flags&hashWriting != 0 {
1812 fatal("concurrent map read and map write")
1813 }
1814 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1815 if t.IndirectKey() {
1816 k = *((*unsafe.Pointer)(k))
1817 }
1818 if s.len >= s.cap {
1819 fatal("concurrent map read and map write")
1820 }
1821 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1822 s.len++
1823 }
1824 b = b.overflow(t)
1825 }
1826 }
1827
1828
1829
1830
1831 func values(m any, p unsafe.Pointer) {
1832 e := efaceOf(&m)
1833 t := (*maptype)(unsafe.Pointer(e._type))
1834 h := (*hmap)(e.data)
1835 if h == nil || h.count == 0 {
1836 return
1837 }
1838 s := (*slice)(p)
1839 r := int(rand())
1840 offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
1841 if h.B == 0 {
1842 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1843 return
1844 }
1845 arraySize := int(bucketShift(h.B))
1846 buckets := h.buckets
1847 for i := 0; i < arraySize; i++ {
1848 bucket := (i + r) & (arraySize - 1)
1849 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1850 copyValues(t, h, b, s, offset)
1851 }
1852
1853 if h.growing() {
1854 oldArraySize := int(h.noldbuckets())
1855 for i := 0; i < oldArraySize; i++ {
1856 bucket := (i + r) & (oldArraySize - 1)
1857 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1858 if evacuated(b) {
1859 continue
1860 }
1861 copyValues(t, h, b, s, offset)
1862 }
1863 }
1864 return
1865 }
1866
1867 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1868 for b != nil {
1869 for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
1870 offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
1871 if isEmpty(b.tophash[offi]) {
1872 continue
1873 }
1874
1875 if h.flags&hashWriting != 0 {
1876 fatal("concurrent map read and map write")
1877 }
1878
1879 ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1880 if t.IndirectElem() {
1881 ele = *((*unsafe.Pointer)(ele))
1882 }
1883 if s.len >= s.cap {
1884 fatal("concurrent map read and map write")
1885 }
1886 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
1887 s.len++
1888 }
1889 b = b.overflow(t)
1890 }
1891 }
1892
View as plain text