1
2
3
4
5 package maps
6
7 import (
8 "internal/abi"
9 "internal/runtime/math"
10 "unsafe"
11 )
12
13
14
15
16
17
18
19 const maxTableCapacity = 1024
20
21
22
23 var _ = uint16(maxTableCapacity)
24
25
26
27
28
29
30
31
32
33 type table struct {
34
35 used uint16
36
37
38
39 capacity uint16
40
41
42
43
44
45
46 growthLeft uint16
47
48
49
50
51 localDepth uint8
52
53
54
55
56
57
58
59 index int
60
61
62
63
64
65
66
67
68
69
70 groups groupsReference
71 }
72
73 func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
74 if capacity < abi.MapGroupSlots {
75 capacity = abi.MapGroupSlots
76 }
77
78 t := &table{
79 index: index,
80 localDepth: localDepth,
81 }
82
83 if capacity > maxTableCapacity {
84 panic("initial table capacity too large")
85 }
86
87
88
89 capacity, overflow := alignUpPow2(capacity)
90 if overflow {
91 panic("rounded-up capacity overflows uint64")
92 }
93
94 t.reset(typ, uint16(capacity))
95
96 return t
97 }
98
99
100
101 func (t *table) reset(typ *abi.MapType, capacity uint16) {
102 groupCount := uint64(capacity) / abi.MapGroupSlots
103 t.groups = newGroups(typ, groupCount)
104 t.capacity = capacity
105 t.growthLeft = t.maxGrowthLeft()
106
107 for i := uint64(0); i <= t.groups.lengthMask; i++ {
108 g := t.groups.group(typ, i)
109 g.ctrls().setEmpty()
110 }
111 }
112
113
114
115 func (t *table) maxGrowthLeft() uint16 {
116 if t.capacity == 0 {
117
118
119 panic("table must have positive capacity")
120 } else if t.capacity <= abi.MapGroupSlots {
121
122
123
124
125
126
127 return t.capacity - 1
128 } else {
129 if t.capacity > math.MaxUint16/maxAvgGroupLoad {
130 panic("overflow")
131 }
132 return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
133 }
134
135 }
136
137 func (t *table) Used() uint64 {
138 return uint64(t.used)
139 }
140
141
142
143 func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
144
145
146
147
148
149
150 hash := typ.Hasher(key, m.seed)
151 _, elem, ok := t.getWithKey(typ, hash, key)
152 return elem, ok
153 }
154
155
156
157
158
159
160
161
162
163
164 func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
193 h2Hash := h2(hash)
194 for ; ; seq = seq.next() {
195 g := t.groups.group(typ, seq.offset)
196
197 match := g.ctrls().matchH2(h2Hash)
198
199 for match != 0 {
200 i := match.first()
201
202 slotKey := g.key(typ, i)
203 if typ.IndirectKey() {
204 slotKey = *((*unsafe.Pointer)(slotKey))
205 }
206 if typ.Key.Equal(key, slotKey) {
207 slotElem := g.elem(typ, i)
208 if typ.IndirectElem() {
209 slotElem = *((*unsafe.Pointer)(slotElem))
210 }
211 return slotKey, slotElem, true
212 }
213 match = match.removeFirst()
214 }
215
216 match = g.ctrls().matchEmpty()
217 if match != 0 {
218
219
220 return nil, nil, false
221 }
222 }
223 }
224
225 func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
226 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
227 h2Hash := h2(hash)
228 for ; ; seq = seq.next() {
229 g := t.groups.group(typ, seq.offset)
230
231 match := g.ctrls().matchH2(h2Hash)
232
233 for match != 0 {
234 i := match.first()
235
236 slotKey := g.key(typ, i)
237 if typ.IndirectKey() {
238 slotKey = *((*unsafe.Pointer)(slotKey))
239 }
240 if typ.Key.Equal(key, slotKey) {
241 slotElem := g.elem(typ, i)
242 if typ.IndirectElem() {
243 slotElem = *((*unsafe.Pointer)(slotElem))
244 }
245 return slotElem, true
246 }
247 match = match.removeFirst()
248 }
249
250 match = g.ctrls().matchEmpty()
251 if match != 0 {
252
253
254 return nil, false
255 }
256 }
257 }
258
259
260
261
262
263
264
265
266 func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
267 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
268
269
270
271 var firstDeletedGroup groupReference
272 var firstDeletedSlot uintptr
273
274 h2Hash := h2(hash)
275 for ; ; seq = seq.next() {
276 g := t.groups.group(typ, seq.offset)
277 match := g.ctrls().matchH2(h2Hash)
278
279
280 for match != 0 {
281 i := match.first()
282
283 slotKey := g.key(typ, i)
284 if typ.IndirectKey() {
285 slotKey = *((*unsafe.Pointer)(slotKey))
286 }
287 if typ.Key.Equal(key, slotKey) {
288 if typ.NeedKeyUpdate() {
289 typedmemmove(typ.Key, slotKey, key)
290 }
291
292 slotElem := g.elem(typ, i)
293 if typ.IndirectElem() {
294 slotElem = *((*unsafe.Pointer)(slotElem))
295 }
296
297 t.checkInvariants(typ, m)
298 return slotElem, true
299 }
300 match = match.removeFirst()
301 }
302
303
304
305 match = g.ctrls().matchEmptyOrDeleted()
306 if match == 0 {
307 continue
308 }
309 i := match.first()
310 if g.ctrls().get(i) == ctrlDeleted {
311
312
313 if firstDeletedGroup.data == nil {
314 firstDeletedGroup = g
315 firstDeletedSlot = i
316 }
317 continue
318 }
319
320
321
322
323
324 if firstDeletedGroup.data != nil {
325 g = firstDeletedGroup
326 i = firstDeletedSlot
327 t.growthLeft++
328 }
329
330
331 if t.growthLeft == 0 {
332 t.pruneTombstones(typ, m)
333 }
334
335
336 if t.growthLeft > 0 {
337 slotKey := g.key(typ, i)
338 if typ.IndirectKey() {
339 kmem := newobject(typ.Key)
340 *(*unsafe.Pointer)(slotKey) = kmem
341 slotKey = kmem
342 }
343 typedmemmove(typ.Key, slotKey, key)
344
345 slotElem := g.elem(typ, i)
346 if typ.IndirectElem() {
347 emem := newobject(typ.Elem)
348 *(*unsafe.Pointer)(slotElem) = emem
349 slotElem = emem
350 }
351
352 g.ctrls().set(i, ctrl(h2Hash))
353 t.growthLeft--
354 t.used++
355 m.used++
356
357 t.checkInvariants(typ, m)
358 return slotElem, true
359 }
360
361 t.rehash(typ, m)
362 return nil, false
363 }
364 }
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
383 if t.growthLeft == 0 {
384 panic("invariant failed: growthLeft is unexpectedly 0")
385 }
386
387
388
389
390
391 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
392 for ; ; seq = seq.next() {
393 g := t.groups.group(typ, seq.offset)
394
395 match := g.ctrls().matchEmptyOrDeleted()
396 if match != 0 {
397 i := match.first()
398
399 slotKey := g.key(typ, i)
400 if typ.IndirectKey() {
401 *(*unsafe.Pointer)(slotKey) = key
402 } else {
403 typedmemmove(typ.Key, slotKey, key)
404 }
405
406 slotElem := g.elem(typ, i)
407 if typ.IndirectElem() {
408 *(*unsafe.Pointer)(slotElem) = elem
409 } else {
410 typedmemmove(typ.Elem, slotElem, elem)
411 }
412
413 t.growthLeft--
414 t.used++
415 g.ctrls().set(i, ctrl(h2(hash)))
416 return
417 }
418 }
419 }
420
421
422 func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
423 seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
424 h2Hash := h2(hash)
425 for ; ; seq = seq.next() {
426 g := t.groups.group(typ, seq.offset)
427 match := g.ctrls().matchH2(h2Hash)
428
429 for match != 0 {
430 i := match.first()
431
432 slotKey := g.key(typ, i)
433 origSlotKey := slotKey
434 if typ.IndirectKey() {
435 slotKey = *((*unsafe.Pointer)(slotKey))
436 }
437
438 if typ.Key.Equal(key, slotKey) {
439 t.used--
440 m.used--
441
442 if typ.IndirectKey() {
443
444 *(*unsafe.Pointer)(origSlotKey) = nil
445 } else if typ.Key.Pointers() {
446
447
448 typedmemclr(typ.Key, slotKey)
449 }
450
451 slotElem := g.elem(typ, i)
452 if typ.IndirectElem() {
453
454 *(*unsafe.Pointer)(slotElem) = nil
455 } else {
456
457
458
459
460
461 typedmemclr(typ.Elem, slotElem)
462 }
463
464
465
466
467
468
469
470
471
472 var tombstone bool
473 if g.ctrls().matchEmpty() != 0 {
474 g.ctrls().set(i, ctrlEmpty)
475 t.growthLeft++
476 } else {
477 g.ctrls().set(i, ctrlDeleted)
478 tombstone = true
479 }
480
481 t.checkInvariants(typ, m)
482 return tombstone
483 }
484 match = match.removeFirst()
485 }
486
487 match = g.ctrls().matchEmpty()
488 if match != 0 {
489
490
491 return false
492 }
493 }
494 }
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510 func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
511 if t.tombstones()*10 < t.capacity {
512
513 return
514 }
515
516
517 var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
518
519
520 for i := uint64(0); i <= t.groups.lengthMask; i++ {
521 g := t.groups.group(typ, i)
522 match := g.ctrls().matchFull()
523 for match != 0 {
524 j := match.first()
525 match = match.removeFirst()
526 key := g.key(typ, j)
527 if typ.IndirectKey() {
528 key = *((*unsafe.Pointer)(key))
529 }
530 if !typ.Key.Equal(key, key) {
531
532
533
534 continue
535 }
536
537
538 hash := typ.Hasher(key, m.seed)
539 for seq := makeProbeSeq(h1(hash), t.groups.lengthMask); ; seq = seq.next() {
540 if seq.offset == i {
541 break
542 }
543 g := t.groups.group(typ, seq.offset)
544 m := g.ctrls().matchEmptyOrDeleted()
545 if m != 0 {
546
547 needed[seq.offset/32] |= 1 << (seq.offset % 32)
548 }
549 }
550 }
551 if g.ctrls().matchEmpty() != 0 {
552
553
554 needed[i/32] |= 1 << (i % 32)
555 }
556 }
557
558
559
560
561 cnt := 0
562 for i := uint64(0); i <= t.groups.lengthMask; i++ {
563 if needed[i/32]>>(i%32)&1 != 0 {
564 continue
565 }
566 g := t.groups.group(typ, i)
567 m := g.ctrls().matchEmptyOrDeleted()
568 cnt += m.count()
569 }
570 if cnt*10 < int(t.capacity) {
571 return
572 }
573
574
575 for i := uint64(0); i <= t.groups.lengthMask; i++ {
576 if needed[i/32]>>(i%32)&1 != 0 {
577 continue
578 }
579 g := t.groups.group(typ, i)
580 m := g.ctrls().matchEmptyOrDeleted()
581 for m != 0 {
582 k := m.first()
583 m = m.removeFirst()
584 g.ctrls().set(k, ctrlEmpty)
585 t.growthLeft++
586 }
587
588
589 }
590 }
591
592
593
594
595 func (t *table) tombstones() uint16 {
596 return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
597 }
598
599
600 func (t *table) Clear(typ *abi.MapType) {
601 mgl := t.maxGrowthLeft()
602 if t.used == 0 && t.growthLeft == mgl {
603 return
604 }
605
606
607
608
609
610
611
612
613
614
615
616
617 fullTest := uint64(t.used)*4 <= t.groups.lengthMask
618 if typ.SlotSize > 32 {
619
620 fullTest = true
621 }
622 if fullTest {
623 for i := uint64(0); i <= t.groups.lengthMask; i++ {
624 g := t.groups.group(typ, i)
625 if g.ctrls().anyFull() {
626 typedmemclr(typ.Group, g.data)
627 }
628 g.ctrls().setEmpty()
629 }
630 } else {
631 for i := uint64(0); i <= t.groups.lengthMask; i++ {
632 g := t.groups.group(typ, i)
633 typedmemclr(typ.Group, g.data)
634 g.ctrls().setEmpty()
635 }
636 }
637 t.used = 0
638 t.growthLeft = mgl
639 }
640
641 type Iter struct {
642 key unsafe.Pointer
643 elem unsafe.Pointer
644 typ *abi.MapType
645 m *Map
646
647
648
649
650 entryOffset uint64
651 dirOffset uint64
652
653
654
655 clearSeq uint64
656
657
658
659 globalDepth uint8
660
661
662
663 dirIdx int
664
665
666 tab *table
667
668
669 group groupReference
670
671
672
673
674 entryIdx uint64
675 }
676
677
678 func (it *Iter) Init(typ *abi.MapType, m *Map) {
679 it.typ = typ
680
681 if m == nil || m.used == 0 {
682 return
683 }
684
685 dirIdx := 0
686 var groupSmall groupReference
687 if m.dirLen <= 0 {
688
689 dirIdx = -1
690 groupSmall.data = m.dirPtr
691 }
692
693 it.m = m
694 it.entryOffset = rand()
695 it.dirOffset = rand()
696 it.globalDepth = m.globalDepth
697 it.dirIdx = dirIdx
698 it.group = groupSmall
699 it.clearSeq = m.clearSeq
700 }
701
702 func (it *Iter) Initialized() bool {
703 return it.typ != nil
704 }
705
706
707 func (it *Iter) Map() *Map {
708 return it.m
709 }
710
711
712
713
714 func (it *Iter) Key() unsafe.Pointer {
715 return it.key
716 }
717
718
719
720
721
722 func (it *Iter) Elem() unsafe.Pointer {
723 return it.elem
724 }
725
726 func (it *Iter) nextDirIdx() {
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753 entries := 1 << (it.m.globalDepth - it.tab.localDepth)
754 it.dirIdx += entries
755 it.tab = nil
756 it.group = groupReference{}
757 it.entryIdx = 0
758 }
759
760
761
762 func (it *Iter) grownKeyElem(key unsafe.Pointer, slotIdx uintptr) (unsafe.Pointer, unsafe.Pointer, bool) {
763 newKey, newElem, ok := it.m.getWithKey(it.typ, key)
764 if !ok {
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788 if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
789 elem := it.group.elem(it.typ, slotIdx)
790 if it.typ.IndirectElem() {
791 elem = *((*unsafe.Pointer)(elem))
792 }
793 return key, elem, true
794 }
795
796
797 return nil, nil, false
798 }
799
800 return newKey, newElem, true
801 }
802
803
804
805
806
807
808
809
810 func (it *Iter) Next() {
811 if it.m == nil {
812
813 it.key = nil
814 it.elem = nil
815 return
816 }
817
818 if it.m.writing != 0 {
819 fatal("concurrent map iteration and map write")
820 return
821 }
822
823 if it.dirIdx < 0 {
824
825 for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
826 k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
827
828 if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
829
830 continue
831 }
832
833 key := it.group.key(it.typ, k)
834 if it.typ.IndirectKey() {
835 key = *((*unsafe.Pointer)(key))
836 }
837
838
839
840
841
842 grown := it.m.dirLen > 0
843 var elem unsafe.Pointer
844 if grown {
845 var ok bool
846 newKey, newElem, ok := it.m.getWithKey(it.typ, key)
847 if !ok {
848
849 if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) {
850 elem = it.group.elem(it.typ, k)
851 if it.typ.IndirectElem() {
852 elem = *((*unsafe.Pointer)(elem))
853 }
854 } else {
855 continue
856 }
857 } else {
858 key = newKey
859 elem = newElem
860 }
861 } else {
862 elem = it.group.elem(it.typ, k)
863 if it.typ.IndirectElem() {
864 elem = *((*unsafe.Pointer)(elem))
865 }
866 }
867
868 it.entryIdx++
869 it.key = key
870 it.elem = elem
871 return
872 }
873 it.key = nil
874 it.elem = nil
875 return
876 }
877
878 if it.globalDepth != it.m.globalDepth {
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908 orders := it.m.globalDepth - it.globalDepth
909 it.dirIdx <<= orders
910 it.dirOffset <<= orders
911
912
913 it.globalDepth = it.m.globalDepth
914 }
915
916
917 for ; it.dirIdx < it.m.dirLen; it.nextDirIdx() {
918
919 if it.tab == nil {
920 dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1))
921 newTab := it.m.directoryAt(uintptr(dirIdx))
922 if newTab.index != dirIdx {
923
924
925
926
927
928
929
930
931
932
933
934 diff := dirIdx - newTab.index
935 it.dirOffset -= uint64(diff)
936 dirIdx = newTab.index
937 }
938 it.tab = newTab
939 }
940
941
942
943
944
945 entryMask := uint64(it.tab.capacity) - 1
946 if it.entryIdx > entryMask {
947
948 continue
949 }
950
951
952
953
954
955
956
957
958
959
960
961
962 entryIdx := (it.entryIdx + it.entryOffset) & entryMask
963 slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
964 if slotIdx == 0 || it.group.data == nil {
965
966
967
968
969 groupIdx := entryIdx >> abi.MapGroupSlotsBits
970 it.group = it.tab.groups.group(it.typ, groupIdx)
971 }
972
973 if (it.group.ctrls().get(slotIdx) & ctrlEmpty) == 0 {
974
975
976 key := it.group.key(it.typ, slotIdx)
977 if it.typ.IndirectKey() {
978 key = *((*unsafe.Pointer)(key))
979 }
980
981 grown := it.tab.index == -1
982 var elem unsafe.Pointer
983 if grown {
984 newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
985 if !ok {
986
987
988
989 goto next
990 } else {
991 key = newKey
992 elem = newElem
993 }
994 } else {
995 elem = it.group.elem(it.typ, slotIdx)
996 if it.typ.IndirectElem() {
997 elem = *((*unsafe.Pointer)(elem))
998 }
999 }
1000
1001 it.entryIdx++
1002 it.key = key
1003 it.elem = elem
1004 return
1005 }
1006
1007 next:
1008 it.entryIdx++
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 var groupMatch bitset
1028 for it.entryIdx <= entryMask {
1029 entryIdx := (it.entryIdx + it.entryOffset) & entryMask
1030 slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
1031
1032 if slotIdx == 0 || it.group.data == nil {
1033
1034
1035
1036
1037 groupIdx := entryIdx >> abi.MapGroupSlotsBits
1038 it.group = it.tab.groups.group(it.typ, groupIdx)
1039 }
1040
1041 if groupMatch == 0 {
1042 groupMatch = it.group.ctrls().matchFull()
1043
1044 if slotIdx != 0 {
1045
1046
1047 groupMatch = groupMatch.removeBelow(slotIdx)
1048 }
1049
1050
1051
1052 if groupMatch == 0 {
1053
1054
1055 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1056 continue
1057 }
1058
1059 i := groupMatch.first()
1060 it.entryIdx += uint64(i - slotIdx)
1061 if it.entryIdx > entryMask {
1062
1063 continue
1064 }
1065 entryIdx += uint64(i - slotIdx)
1066 slotIdx = i
1067 }
1068
1069 key := it.group.key(it.typ, slotIdx)
1070 if it.typ.IndirectKey() {
1071 key = *((*unsafe.Pointer)(key))
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 grown := it.tab.index == -1
1086 var elem unsafe.Pointer
1087 if grown {
1088 newKey, newElem, ok := it.grownKeyElem(key, slotIdx)
1089 if !ok {
1090
1091
1092 groupMatch = groupMatch.removeFirst()
1093 if groupMatch == 0 {
1094
1095
1096
1097 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1098 continue
1099 }
1100
1101
1102 i := groupMatch.first()
1103 it.entryIdx += uint64(i - slotIdx)
1104 continue
1105 } else {
1106 key = newKey
1107 elem = newElem
1108 }
1109 } else {
1110 elem = it.group.elem(it.typ, slotIdx)
1111 if it.typ.IndirectElem() {
1112 elem = *((*unsafe.Pointer)(elem))
1113 }
1114 }
1115
1116
1117 groupMatch = groupMatch.removeFirst()
1118 if groupMatch == 0 {
1119
1120
1121
1122 it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
1123 } else {
1124
1125 i := groupMatch.first()
1126 it.entryIdx += uint64(i - slotIdx)
1127 }
1128
1129 it.key = key
1130 it.elem = elem
1131 return
1132 }
1133
1134
1135 }
1136
1137 it.key = nil
1138 it.elem = nil
1139 return
1140 }
1141
1142
1143
1144
1145 func (t *table) rehash(typ *abi.MapType, m *Map) {
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161 newCapacity := 2 * t.capacity
1162 if newCapacity <= maxTableCapacity {
1163 t.grow(typ, m, newCapacity)
1164 return
1165 }
1166
1167 t.split(typ, m)
1168 }
1169
1170
1171 func localDepthMask(localDepth uint8) uintptr {
1172 if !Use64BitHash {
1173 return uintptr(1) << (32 - localDepth)
1174 }
1175 return uintptr(1) << (64 - localDepth)
1176 }
1177
1178
1179 func (t *table) split(typ *abi.MapType, m *Map) {
1180 localDepth := t.localDepth
1181 localDepth++
1182
1183
1184 left := newTable(typ, maxTableCapacity, -1, localDepth)
1185 right := newTable(typ, maxTableCapacity, -1, localDepth)
1186
1187
1188 mask := localDepthMask(localDepth)
1189
1190 for i := uint64(0); i <= t.groups.lengthMask; i++ {
1191 g := t.groups.group(typ, i)
1192 for j := uintptr(0); j < abi.MapGroupSlots; j++ {
1193 if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
1194
1195 continue
1196 }
1197
1198 key := g.key(typ, j)
1199 if typ.IndirectKey() {
1200 key = *((*unsafe.Pointer)(key))
1201 }
1202
1203 elem := g.elem(typ, j)
1204 if typ.IndirectElem() {
1205 elem = *((*unsafe.Pointer)(elem))
1206 }
1207
1208 hash := typ.Hasher(key, m.seed)
1209 var newTable *table
1210 if hash&mask == 0 {
1211 newTable = left
1212 } else {
1213 newTable = right
1214 }
1215 newTable.uncheckedPutSlot(typ, hash, key, elem)
1216 }
1217 }
1218
1219 m.installTableSplit(t, left, right)
1220 t.index = -1
1221 }
1222
1223
1224
1225
1226
1227 func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
1228 newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
1229
1230 if t.capacity > 0 {
1231 for i := uint64(0); i <= t.groups.lengthMask; i++ {
1232 g := t.groups.group(typ, i)
1233 for j := uintptr(0); j < abi.MapGroupSlots; j++ {
1234 if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
1235
1236 continue
1237 }
1238
1239 key := g.key(typ, j)
1240 if typ.IndirectKey() {
1241 key = *((*unsafe.Pointer)(key))
1242 }
1243
1244 elem := g.elem(typ, j)
1245 if typ.IndirectElem() {
1246 elem = *((*unsafe.Pointer)(elem))
1247 }
1248
1249 hash := typ.Hasher(key, m.seed)
1250
1251 newTable.uncheckedPutSlot(typ, hash, key, elem)
1252 }
1253 }
1254 }
1255
1256 newTable.checkInvariants(typ, m)
1257 m.replaceTable(newTable)
1258 t.index = -1
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 type probeSeq struct {
1273 mask uint64
1274 offset uint64
1275 index uint64
1276 }
1277
1278 func makeProbeSeq(hash uintptr, mask uint64) probeSeq {
1279 return probeSeq{
1280 mask: mask,
1281 offset: uint64(hash) & mask,
1282 index: 0,
1283 }
1284 }
1285
1286 func (s probeSeq) next() probeSeq {
1287 s.index++
1288 s.offset = (s.offset + s.index) & s.mask
1289 return s
1290 }
1291
1292 func (t *table) clone(typ *abi.MapType) *table {
1293
1294 t2 := new(table)
1295 *t2 = *t
1296 t = t2
1297
1298
1299 oldGroups := t.groups
1300 newGroups := newGroups(typ, oldGroups.lengthMask+1)
1301 for i := uint64(0); i <= oldGroups.lengthMask; i++ {
1302 oldGroup := oldGroups.group(typ, i)
1303 newGroup := newGroups.group(typ, i)
1304 cloneGroup(typ, newGroup, oldGroup)
1305 }
1306 t.groups = newGroups
1307
1308 return t
1309 }
1310
View as plain text