Source file src/internal/runtime/maps/map.go
1 // Copyright 2024 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Package maps implements Go's builtin map type. 6 package maps 7 8 import ( 9 "internal/abi" 10 "internal/goarch" 11 "internal/runtime/math" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 // This package contains the implementation of Go's builtin map type. 17 // 18 // The map design is based on Abseil's "Swiss Table" map design 19 // (https://abseil.io/about/design/swisstables), with additional modifications 20 // to cover Go's additional requirements, discussed below. 21 // 22 // Terminology: 23 // - Slot: A storage location of a single key/element pair. 24 // - Group: A group of abi.SwissMapGroupSlots (8) slots, plus a control word. 25 // - Control word: An 8-byte word which denotes whether each slot is empty, 26 // deleted, or used. If a slot is used, its control byte also contains the 27 // lower 7 bits of the hash (H2). 28 // - H1: Upper 57 bits of a hash. 29 // - H2: Lower 7 bits of a hash. 30 // - Table: A complete "Swiss Table" hash table. A table consists of one or 31 // more groups for storage plus metadata to handle operation and determining 32 // when to grow. 33 // - Map: The top-level Map type consists of zero or more tables for storage. 34 // The upper bits of the hash select which table a key belongs to. 35 // - Directory: Array of the tables used by the map. 36 // 37 // At its core, the table design is similar to a traditional open-addressed 38 // hash table. Storage consists of an array of groups, which effectively means 39 // an array of key/elem slots with some control words interspersed. Lookup uses 40 // the hash to determine an initial group to check. If, due to collisions, this 41 // group contains no match, the probe sequence selects the next group to check 42 // (see below for more detail about the probe sequence). 43 // 44 // The key difference occurs within a group. In a standard open-addressed 45 // linear probed hash table, we would check each slot one at a time to find a 46 // match. A swiss table utilizes the extra control word to check all 8 slots in 47 // parallel. 48 // 49 // Each byte in the control word corresponds to one of the slots in the group. 50 // In each byte, 1 bit is used to indicate whether the slot is in use, or if it 51 // is empty/deleted. The other 7 bits contain the lower 7 bits of the hash for 52 // the key in that slot. See [ctrl] for the exact encoding. 53 // 54 // During lookup, we can use some clever bitwise manipulation to compare all 8 55 // 7-bit hashes against the input hash in parallel (see [ctrlGroup.matchH2]). 56 // That is, we effectively perform 8 steps of probing in a single operation. 57 // With SIMD instructions, this could be extended to 16 slots with a 16-byte 58 // control word. 59 // 60 // Since we only use 7 bits of the 64 bit hash, there is a 1 in 128 (~0.7%) 61 // probability of false positive on each slot, but that's fine: we always need 62 // double check each match with a standard key comparison regardless. 63 // 64 // Probing 65 // 66 // Probing is done using the upper 57 bits (H1) of the hash as an index into 67 // the groups array. Probing walks through the groups using quadratic probing 68 // until it finds a group with a match or a group with an empty slot. See 69 // [probeSeq] for specifics about the probe sequence. Note the probe 70 // invariants: the number of groups must be a power of two, and the end of a 71 // probe sequence must be a group with an empty slot (the table can never be 72 // 100% full). 73 // 74 // Deletion 75 // 76 // Probing stops when it finds a group with an empty slot. This affects 77 // deletion: when deleting from a completely full group, we must not mark the 78 // slot as empty, as there could be more slots used later in a probe sequence 79 // and this deletion would cause probing to stop too early. Instead, we mark 80 // such slots as "deleted" with a tombstone. If the group still has an empty 81 // slot, we don't need a tombstone and directly mark the slot empty. Insert 82 // prioritizes reuse of tombstones over filling an empty slots. Otherwise, 83 // tombstones are only completely cleared during grow, as an in-place cleanup 84 // complicates iteration. 85 // 86 // Growth 87 // 88 // The probe sequence depends on the number of groups. Thus, when growing the 89 // group count all slots must be reordered to match the new probe sequence. In 90 // other words, an entire table must be grown at once. 91 // 92 // In order to support incremental growth, the map splits its contents across 93 // multiple tables. Each table is still a full hash table, but an individual 94 // table may only service a subset of the hash space. Growth occurs on 95 // individual tables, so while an entire table must grow at once, each of these 96 // grows is only a small portion of a map. The maximum size of a single grow is 97 // limited by limiting the maximum size of a table before it is split into 98 // multiple tables. 99 // 100 // A map starts with a single table. Up to [maxTableCapacity], growth simply 101 // replaces this table with a replacement with double capacity. Beyond this 102 // limit, growth splits the table into two. 103 // 104 // The map uses "extendible hashing" to select which table to use. In 105 // extendible hashing, we use the upper bits of the hash as an index into an 106 // array of tables (called the "directory"). The number of bits uses increases 107 // as the number of tables increases. For example, when there is only 1 table, 108 // we use 0 bits (no selection necessary). When there are 2 tables, we use 1 109 // bit to select either the 0th or 1st table. [Map.globalDepth] is the number 110 // of bits currently used for table selection, and by extension (1 << 111 // globalDepth), the size of the directory. 112 // 113 // Note that each table has its own load factor and grows independently. If the 114 // 1st bucket grows, it will split. We'll need 2 bits to select tables, though 115 // we'll have 3 tables total rather than 4. We support this by allowing 116 // multiple indicies to point to the same table. This example: 117 // 118 // directory (globalDepth=2) 119 // +----+ 120 // | 00 | --\ 121 // +----+ +--> table (localDepth=1) 122 // | 01 | --/ 123 // +----+ 124 // | 10 | ------> table (localDepth=2) 125 // +----+ 126 // | 11 | ------> table (localDepth=2) 127 // +----+ 128 // 129 // Tables track the depth they were created at (localDepth). It is necessary to 130 // grow the directory when splitting a table where globalDepth == localDepth. 131 // 132 // Iteration 133 // 134 // Iteration is the most complex part of the map due to Go's generous iteration 135 // semantics. A summary of semantics from the spec: 136 // 1. Adding and/or deleting entries during iteration MUST NOT cause iteration 137 // to return the same entry more than once. 138 // 2. Entries added during iteration MAY be returned by iteration. 139 // 3. Entries modified during iteration MUST return their latest value. 140 // 4. Entries deleted during iteration MUST NOT be returned by iteration. 141 // 5. Iteration order is unspecified. In the implementation, it is explicitly 142 // randomized. 143 // 144 // If the map never grows, these semantics are straightforward: just iterate 145 // over every table in the directory and every group and slot in each table. 146 // These semantics all land as expected. 147 // 148 // If the map grows during iteration, things complicate significantly. First 149 // and foremost, we need to track which entries we already returned to satisfy 150 // (1). There are three types of grow: 151 // a. A table replaced by a single larger table. 152 // b. A table split into two replacement tables. 153 // c. Growing the directory (occurs as part of (b) if necessary). 154 // 155 // For all of these cases, the replacement table(s) will have a different probe 156 // sequence, so simply tracking the current group and slot indices is not 157 // sufficient. 158 // 159 // For (a) and (b), note that grows of tables other than the one we are 160 // currently iterating over are irrelevant. 161 // 162 // We handle (a) and (b) by having the iterator keep a reference to the table 163 // it is currently iterating over, even after the table is replaced. We keep 164 // iterating over the original table to maintain the iteration order and avoid 165 // violating (1). Any new entries added only to the replacement table(s) will 166 // be skipped (allowed by (2)). To avoid violating (3) or (4), while we use the 167 // original table to select the keys, we must look them up again in the new 168 // table(s) to determine if they have been modified or deleted. There is yet 169 // another layer of complexity if the key does not compare equal itself. See 170 // [Iter.Next] for the gory details. 171 // 172 // Note that for (b) once we finish iterating over the old table we'll need to 173 // skip the next entry in the directory, as that contains the second split of 174 // the old table. We can use the old table's localDepth to determine the next 175 // logical index to use. 176 // 177 // For (b), we must adjust the current directory index when the directory 178 // grows. This is more straightforward, as the directory orders remains the 179 // same after grow, so we just double the index if the directory size doubles. 180 181 // Extracts the H1 portion of a hash: the 57 upper bits. 182 // TODO(prattmic): what about 32-bit systems? 183 func h1(h uintptr) uintptr { 184 return h >> 7 185 } 186 187 // Extracts the H2 portion of a hash: the 7 bits not used for h1. 188 // 189 // These are used as an occupied control byte. 190 func h2(h uintptr) uintptr { 191 return h & 0x7f 192 } 193 194 type Map struct { 195 // The number of filled slots (i.e. the number of elements in all 196 // tables). Excludes deleted slots. 197 used uint64 198 199 // seed is the hash seed, computed as a unique random number per map. 200 seed uintptr 201 202 // The directory of tables. 203 // 204 // Normally dirPtr points to an array of table pointers 205 // 206 // dirPtr *[dirLen]*table 207 // 208 // The length (dirLen) of this array is `1 << globalDepth`. Multiple 209 // entries may point to the same table. See top-level comment for more 210 // details. 211 // 212 // Small map optimization: if the map always contained 213 // abi.SwissMapGroupSlots or fewer entries, it fits entirely in a 214 // single group. In that case dirPtr points directly to a single group. 215 // 216 // dirPtr *group 217 // 218 // In this case, dirLen is 0. used counts the number of used slots in 219 // the group. Note that small maps never have deleted slots (as there 220 // is no probe sequence to maintain). 221 dirPtr unsafe.Pointer 222 dirLen int 223 224 // The number of bits to use in table directory lookups. 225 globalDepth uint8 226 227 // The number of bits to shift out of the hash for directory lookups. 228 // On 64-bit systems, this is 64 - globalDepth. 229 globalShift uint8 230 231 // writing is a flag that is toggled (XOR 1) while the map is being 232 // written. Normally it is set to 1 when writing, but if there are 233 // multiple concurrent writers, then toggling increases the probability 234 // that both sides will detect the race. 235 writing uint8 236 237 // clearSeq is a sequence counter of calls to Clear. It is used to 238 // detect map clears during iteration. 239 clearSeq uint64 240 } 241 242 func depthToShift(depth uint8) uint8 { 243 if goarch.PtrSize == 4 { 244 return 32 - depth 245 } 246 return 64 - depth 247 } 248 249 // If m is non-nil, it should be used rather than allocating. 250 // 251 // maxAlloc should be runtime.maxAlloc. 252 // 253 // TODO(prattmic): Put maxAlloc somewhere accessible. 254 func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map { 255 if m == nil { 256 m = new(Map) 257 } 258 259 m.seed = uintptr(rand()) 260 261 if hint <= abi.SwissMapGroupSlots { 262 // A small map can fill all 8 slots, so no need to increase 263 // target capacity. 264 // 265 // In fact, since an 8 slot group is what the first assignment 266 // to an empty map would allocate anyway, it doesn't matter if 267 // we allocate here or on the first assignment. 268 // 269 // Thus we just return without allocating. (We'll save the 270 // allocation completely if no assignment comes.) 271 272 // Note that the compiler may have initialized m.dirPtr with a 273 // pointer to a stack-allocated group, in which case we already 274 // have a group. The control word is already initialized. 275 276 return m 277 } 278 279 // Full size map. 280 281 // Set initial capacity to hold hint entries without growing in the 282 // average case. 283 targetCapacity := (hint * abi.SwissMapGroupSlots) / maxAvgGroupLoad 284 if targetCapacity < hint { // overflow 285 return m // return an empty map. 286 } 287 288 dirSize := (uint64(targetCapacity) + maxTableCapacity - 1) / maxTableCapacity 289 dirSize, overflow := alignUpPow2(dirSize) 290 if overflow || dirSize > uint64(math.MaxUintptr) { 291 return m // return an empty map. 292 } 293 294 // Reject hints that are obviously too large. 295 groups, overflow := math.MulUintptr(uintptr(dirSize), maxTableCapacity) 296 if overflow { 297 return m // return an empty map. 298 } else { 299 mem, overflow := math.MulUintptr(groups, mt.GroupSize) 300 if overflow || mem > maxAlloc { 301 return m // return an empty map. 302 } 303 } 304 305 m.globalDepth = uint8(sys.TrailingZeros64(dirSize)) 306 m.globalShift = depthToShift(m.globalDepth) 307 308 directory := make([]*table, dirSize) 309 310 for i := range directory { 311 // TODO: Think more about initial table capacity. 312 directory[i] = newTable(mt, uint64(targetCapacity)/dirSize, i, m.globalDepth) 313 } 314 315 m.dirPtr = unsafe.Pointer(&directory[0]) 316 m.dirLen = len(directory) 317 318 return m 319 } 320 321 func NewEmptyMap() *Map { 322 m := new(Map) 323 m.seed = uintptr(rand()) 324 // See comment in NewMap. No need to eager allocate a group. 325 return m 326 } 327 328 func (m *Map) directoryIndex(hash uintptr) uintptr { 329 if m.dirLen == 1 { 330 return 0 331 } 332 return hash >> (m.globalShift & 63) 333 } 334 335 func (m *Map) directoryAt(i uintptr) *table { 336 return *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) 337 } 338 339 func (m *Map) directorySet(i uintptr, nt *table) { 340 *(**table)(unsafe.Pointer(uintptr(m.dirPtr) + goarch.PtrSize*i)) = nt 341 } 342 343 func (m *Map) replaceTable(nt *table) { 344 // The number of entries that reference the same table doubles for each 345 // time the globalDepth grows without the table splitting. 346 entries := 1 << (m.globalDepth - nt.localDepth) 347 for i := 0; i < entries; i++ { 348 //m.directory[nt.index+i] = nt 349 m.directorySet(uintptr(nt.index+i), nt) 350 } 351 } 352 353 func (m *Map) installTableSplit(old, left, right *table) { 354 if old.localDepth == m.globalDepth { 355 // No room for another level in the directory. Grow the 356 // directory. 357 newDir := make([]*table, m.dirLen*2) 358 for i := range m.dirLen { 359 t := m.directoryAt(uintptr(i)) 360 newDir[2*i] = t 361 newDir[2*i+1] = t 362 // t may already exist in multiple indicies. We should 363 // only update t.index once. Since the index must 364 // increase, seeing the original index means this must 365 // be the first time we've encountered this table. 366 if t.index == i { 367 t.index = 2 * i 368 } 369 } 370 m.globalDepth++ 371 m.globalShift-- 372 //m.directory = newDir 373 m.dirPtr = unsafe.Pointer(&newDir[0]) 374 m.dirLen = len(newDir) 375 } 376 377 // N.B. left and right may still consume multiple indicies if the 378 // directory has grown multiple times since old was last split. 379 left.index = old.index 380 m.replaceTable(left) 381 382 entries := 1 << (m.globalDepth - left.localDepth) 383 right.index = left.index + entries 384 m.replaceTable(right) 385 } 386 387 func (m *Map) Used() uint64 { 388 return m.used 389 } 390 391 // Get performs a lookup of the key that key points to. It returns a pointer to 392 // the element, or false if the key doesn't exist. 393 func (m *Map) Get(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 394 return m.getWithoutKey(typ, key) 395 } 396 397 func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 398 if m.Used() == 0 { 399 return nil, nil, false 400 } 401 402 if m.writing != 0 { 403 fatal("concurrent map read and map write") 404 } 405 406 hash := typ.Hasher(key, m.seed) 407 408 if m.dirLen == 0 { 409 return m.getWithKeySmall(typ, hash, key) 410 } 411 412 idx := m.directoryIndex(hash) 413 return m.directoryAt(idx).getWithKey(typ, hash, key) 414 } 415 416 func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 417 if m.Used() == 0 { 418 return nil, false 419 } 420 421 if m.writing != 0 { 422 fatal("concurrent map read and map write") 423 } 424 425 hash := typ.Hasher(key, m.seed) 426 427 if m.dirLen == 0 { 428 _, elem, ok := m.getWithKeySmall(typ, hash, key) 429 return elem, ok 430 } 431 432 idx := m.directoryIndex(hash) 433 return m.directoryAt(idx).getWithoutKey(typ, hash, key) 434 } 435 436 func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 437 g := groupReference{ 438 data: m.dirPtr, 439 } 440 441 h2 := uint8(h2(hash)) 442 ctrls := *g.ctrls() 443 444 for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ { 445 c := uint8(ctrls) 446 ctrls >>= 8 447 if c != h2 { 448 continue 449 } 450 451 slotKey := g.key(typ, i) 452 if typ.IndirectKey() { 453 slotKey = *((*unsafe.Pointer)(slotKey)) 454 } 455 456 if typ.Key.Equal(key, slotKey) { 457 slotElem := g.elem(typ, i) 458 if typ.IndirectElem() { 459 slotElem = *((*unsafe.Pointer)(slotElem)) 460 } 461 return slotKey, slotElem, true 462 } 463 } 464 465 return nil, nil, false 466 } 467 468 func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) { 469 slotElem := m.PutSlot(typ, key) 470 typedmemmove(typ.Elem, slotElem, elem) 471 } 472 473 // PutSlot returns a pointer to the element slot where an inserted element 474 // should be written. 475 // 476 // PutSlot never returns nil. 477 func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer { 478 if m.writing != 0 { 479 fatal("concurrent map writes") 480 } 481 482 hash := typ.Hasher(key, m.seed) 483 484 // Set writing after calling Hasher, since Hasher may panic, in which 485 // case we have not actually done a write. 486 m.writing ^= 1 // toggle, see comment on writing 487 488 if m.dirPtr == nil { 489 m.growToSmall(typ) 490 } 491 492 if m.dirLen == 0 { 493 if m.used < abi.SwissMapGroupSlots { 494 elem := m.putSlotSmall(typ, hash, key) 495 496 if m.writing == 0 { 497 fatal("concurrent map writes") 498 } 499 m.writing ^= 1 500 501 return elem 502 } 503 504 // Can't fit another entry, grow to full size map. 505 // 506 // TODO(prattmic): If this is an update to an existing key then 507 // we actually don't need to grow. 508 m.growToTable(typ) 509 } 510 511 for { 512 idx := m.directoryIndex(hash) 513 elem, ok := m.directoryAt(idx).PutSlot(typ, m, hash, key) 514 if !ok { 515 continue 516 } 517 518 if m.writing == 0 { 519 fatal("concurrent map writes") 520 } 521 m.writing ^= 1 522 523 return elem 524 } 525 } 526 527 func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { 528 g := groupReference{ 529 data: m.dirPtr, 530 } 531 532 match := g.ctrls().matchH2(h2(hash)) 533 534 // Look for an existing slot containing this key. 535 for match != 0 { 536 i := match.first() 537 538 slotKey := g.key(typ, i) 539 if typ.IndirectKey() { 540 slotKey = *((*unsafe.Pointer)(slotKey)) 541 } 542 if typ.Key.Equal(key, slotKey) { 543 if typ.NeedKeyUpdate() { 544 typedmemmove(typ.Key, slotKey, key) 545 } 546 547 slotElem := g.elem(typ, i) 548 if typ.IndirectElem() { 549 slotElem = *((*unsafe.Pointer)(slotElem)) 550 } 551 552 return slotElem 553 } 554 match = match.removeFirst() 555 } 556 557 // There can't be deleted slots, small maps can't have them 558 // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 559 // more efficient than matchEmpty. 560 match = g.ctrls().matchEmptyOrDeleted() 561 if match == 0 { 562 fatal("small map with no empty slot (concurrent map writes?)") 563 return nil 564 } 565 566 i := match.first() 567 568 slotKey := g.key(typ, i) 569 if typ.IndirectKey() { 570 kmem := newobject(typ.Key) 571 *(*unsafe.Pointer)(slotKey) = kmem 572 slotKey = kmem 573 } 574 typedmemmove(typ.Key, slotKey, key) 575 576 slotElem := g.elem(typ, i) 577 if typ.IndirectElem() { 578 emem := newobject(typ.Elem) 579 *(*unsafe.Pointer)(slotElem) = emem 580 slotElem = emem 581 } 582 583 g.ctrls().set(i, ctrl(h2(hash))) 584 m.used++ 585 586 return slotElem 587 } 588 589 func (m *Map) growToSmall(typ *abi.SwissMapType) { 590 grp := newGroups(typ, 1) 591 m.dirPtr = grp.data 592 593 g := groupReference{ 594 data: m.dirPtr, 595 } 596 g.ctrls().setEmpty() 597 } 598 599 func (m *Map) growToTable(typ *abi.SwissMapType) { 600 tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0) 601 602 g := groupReference{ 603 data: m.dirPtr, 604 } 605 606 for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ { 607 if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty { 608 // Empty 609 continue 610 } 611 612 key := g.key(typ, i) 613 if typ.IndirectKey() { 614 key = *((*unsafe.Pointer)(key)) 615 } 616 617 elem := g.elem(typ, i) 618 if typ.IndirectElem() { 619 elem = *((*unsafe.Pointer)(elem)) 620 } 621 622 hash := typ.Hasher(key, m.seed) 623 624 tab.uncheckedPutSlot(typ, hash, key, elem) 625 } 626 627 directory := make([]*table, 1) 628 629 directory[0] = tab 630 631 m.dirPtr = unsafe.Pointer(&directory[0]) 632 m.dirLen = len(directory) 633 634 m.globalDepth = 0 635 m.globalShift = depthToShift(m.globalDepth) 636 } 637 638 func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) { 639 if m == nil || m.Used() == 0 { 640 if err := mapKeyError(typ, key); err != nil { 641 panic(err) // see issue 23734 642 } 643 return 644 } 645 646 if m.writing != 0 { 647 fatal("concurrent map writes") 648 } 649 650 hash := typ.Hasher(key, m.seed) 651 652 // Set writing after calling Hasher, since Hasher may panic, in which 653 // case we have not actually done a write. 654 m.writing ^= 1 // toggle, see comment on writing 655 656 if m.dirLen == 0 { 657 m.deleteSmall(typ, hash, key) 658 } else { 659 idx := m.directoryIndex(hash) 660 m.directoryAt(idx).Delete(typ, m, hash, key) 661 } 662 663 if m.used == 0 { 664 // Reset the hash seed to make it more difficult for attackers 665 // to repeatedly trigger hash collisions. See 666 // https://go.dev/issue/25237. 667 m.seed = uintptr(rand()) 668 } 669 670 if m.writing == 0 { 671 fatal("concurrent map writes") 672 } 673 m.writing ^= 1 674 } 675 676 func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) { 677 g := groupReference{ 678 data: m.dirPtr, 679 } 680 681 match := g.ctrls().matchH2(h2(hash)) 682 683 for match != 0 { 684 i := match.first() 685 slotKey := g.key(typ, i) 686 origSlotKey := slotKey 687 if typ.IndirectKey() { 688 slotKey = *((*unsafe.Pointer)(slotKey)) 689 } 690 if typ.Key.Equal(key, slotKey) { 691 m.used-- 692 693 if typ.IndirectKey() { 694 // Clearing the pointer is sufficient. 695 *(*unsafe.Pointer)(origSlotKey) = nil 696 } else if typ.Key.Pointers() { 697 // Only bother clearing if there are pointers. 698 typedmemclr(typ.Key, slotKey) 699 } 700 701 slotElem := g.elem(typ, i) 702 if typ.IndirectElem() { 703 // Clearing the pointer is sufficient. 704 *(*unsafe.Pointer)(slotElem) = nil 705 } else { 706 // Unlike keys, always clear the elem (even if 707 // it contains no pointers), as compound 708 // assignment operations depend on cleared 709 // deleted values. See 710 // https://go.dev/issue/25936. 711 typedmemclr(typ.Elem, slotElem) 712 } 713 714 // We only have 1 group, so it is OK to immediately 715 // reuse deleted slots. 716 g.ctrls().set(i, ctrlEmpty) 717 return 718 } 719 match = match.removeFirst() 720 } 721 } 722 723 // Clear deletes all entries from the map resulting in an empty map. 724 func (m *Map) Clear(typ *abi.SwissMapType) { 725 if m == nil || m.Used() == 0 { 726 return 727 } 728 729 if m.writing != 0 { 730 fatal("concurrent map writes") 731 } 732 m.writing ^= 1 // toggle, see comment on writing 733 734 if m.dirLen == 0 { 735 m.clearSmall(typ) 736 } else { 737 var lastTab *table 738 for i := range m.dirLen { 739 t := m.directoryAt(uintptr(i)) 740 if t == lastTab { 741 continue 742 } 743 t.Clear(typ) 744 lastTab = t 745 } 746 m.used = 0 747 m.clearSeq++ 748 // TODO: shrink directory? 749 } 750 751 // Reset the hash seed to make it more difficult for attackers to 752 // repeatedly trigger hash collisions. See https://go.dev/issue/25237. 753 m.seed = uintptr(rand()) 754 755 if m.writing == 0 { 756 fatal("concurrent map writes") 757 } 758 m.writing ^= 1 759 } 760 761 func (m *Map) clearSmall(typ *abi.SwissMapType) { 762 g := groupReference{ 763 data: m.dirPtr, 764 } 765 766 typedmemclr(typ.Group, g.data) 767 g.ctrls().setEmpty() 768 769 m.used = 0 770 m.clearSeq++ 771 } 772