Source file src/internal/runtime/maps/runtime.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/asan"
    10  	"internal/msan"
    11  	"internal/race"
    12  	"internal/runtime/sys"
    13  	"unsafe"
    14  )
    15  
    16  // Functions below pushed from runtime.
    17  
    18  //go:linkname fatal
    19  func fatal(s string)
    20  
    21  //go:linkname rand
    22  func rand() uint64
    23  
    24  //go:linkname typedmemmove
    25  func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer)
    26  
    27  //go:linkname typedmemclr
    28  func typedmemclr(typ *abi.Type, ptr unsafe.Pointer)
    29  
    30  //go:linkname newarray
    31  func newarray(typ *abi.Type, n int) unsafe.Pointer
    32  
    33  //go:linkname newobject
    34  func newobject(typ *abi.Type) unsafe.Pointer
    35  
    36  // Pushed from runtime in order to use runtime.plainError
    37  //
    38  //go:linkname errNilAssign
    39  var errNilAssign error
    40  
    41  // TODO: move zeroVal to internal/abi?
    42  //
    43  //go:linkname zeroVal runtime.zeroVal
    44  var zeroVal [abi.ZeroValSize]byte
    45  
    46  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
    47  // it will return a reference to the zero object for the elem type if
    48  // the key is not in the map.
    49  // NOTE: The returned pointer may keep the whole map live, so don't
    50  // hold onto it for very long.
    51  //
    52  //go:linkname runtime_mapaccess1 runtime.mapaccess1
    53  func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
    54  	p, _ := runtime_mapaccess2(typ, m, key)
    55  	return p
    56  }
    57  
    58  //go:linkname runtime_mapaccess2 runtime.mapaccess2
    59  func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
    60  	if race.Enabled && m != nil {
    61  		callerpc := sys.GetCallerPC()
    62  		pc := abi.FuncPCABIInternal(runtime_mapaccess2)
    63  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    64  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
    65  	}
    66  	if msan.Enabled && m != nil {
    67  		msan.Read(key, typ.Key.Size_)
    68  	}
    69  	if asan.Enabled && m != nil {
    70  		asan.Read(key, typ.Key.Size_)
    71  	}
    72  
    73  	if m == nil || m.Used() == 0 {
    74  		if err := mapKeyError(typ, key); err != nil {
    75  			panic(err) // see issue 23734
    76  		}
    77  		return unsafe.Pointer(&zeroVal[0]), false
    78  	}
    79  
    80  	if m.writing != 0 {
    81  		fatal("concurrent map read and map write")
    82  	}
    83  
    84  	hash := typ.Hasher(key, m.seed)
    85  
    86  	if m.dirLen == 0 {
    87  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
    88  		if !ok {
    89  			return unsafe.Pointer(&zeroVal[0]), false
    90  		}
    91  		return elem, true
    92  	}
    93  
    94  	// Select table.
    95  	idx := m.directoryIndex(hash)
    96  	t := m.directoryAt(idx)
    97  
    98  	// Probe table.
    99  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   100  	h2Hash := h2(hash)
   101  	for ; ; seq = seq.next() {
   102  		g := t.groups.group(typ, seq.offset)
   103  
   104  		match := g.ctrls().matchH2(h2Hash)
   105  
   106  		for match != 0 {
   107  			i := match.first()
   108  
   109  			slotKey := g.key(typ, i)
   110  			slotKeyOrig := slotKey
   111  			if typ.IndirectKey() {
   112  				slotKey = *((*unsafe.Pointer)(slotKey))
   113  			}
   114  			if typ.Key.Equal(key, slotKey) {
   115  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   116  				if typ.IndirectElem() {
   117  					slotElem = *((*unsafe.Pointer)(slotElem))
   118  				}
   119  				return slotElem, true
   120  			}
   121  			match = match.removeFirst()
   122  		}
   123  
   124  		match = g.ctrls().matchEmpty()
   125  		if match != 0 {
   126  			// Finding an empty slot means we've reached the end of
   127  			// the probe sequence.
   128  			return unsafe.Pointer(&zeroVal[0]), false
   129  		}
   130  	}
   131  }
   132  
   133  //go:linkname runtime_mapassign runtime.mapassign
   134  func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   135  	if m == nil {
   136  		panic(errNilAssign)
   137  	}
   138  	if race.Enabled {
   139  		callerpc := sys.GetCallerPC()
   140  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   141  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   142  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   143  	}
   144  	if msan.Enabled {
   145  		msan.Read(key, typ.Key.Size_)
   146  	}
   147  	if asan.Enabled {
   148  		asan.Read(key, typ.Key.Size_)
   149  	}
   150  	if m.writing != 0 {
   151  		fatal("concurrent map writes")
   152  	}
   153  
   154  	hash := typ.Hasher(key, m.seed)
   155  
   156  	// Set writing after calling Hasher, since Hasher may panic, in which
   157  	// case we have not actually done a write.
   158  	m.writing ^= 1 // toggle, see comment on writing
   159  
   160  	if m.dirPtr == nil {
   161  		m.growToSmall(typ)
   162  	}
   163  
   164  	if m.dirLen == 0 {
   165  		if m.used < abi.MapGroupSlots {
   166  			elem := m.putSlotSmall(typ, hash, key)
   167  
   168  			if m.writing == 0 {
   169  				fatal("concurrent map writes")
   170  			}
   171  			m.writing ^= 1
   172  
   173  			return elem
   174  		}
   175  
   176  		// Can't fit another entry, grow to full size map.
   177  		m.growToTable(typ)
   178  	}
   179  
   180  	var slotElem unsafe.Pointer
   181  outer:
   182  	for {
   183  		// Select table.
   184  		idx := m.directoryIndex(hash)
   185  		t := m.directoryAt(idx)
   186  
   187  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   188  
   189  		// As we look for a match, keep track of the first deleted slot
   190  		// we find, which we'll use to insert the new entry if
   191  		// necessary.
   192  		var firstDeletedGroup groupReference
   193  		var firstDeletedSlot uintptr
   194  
   195  		h2Hash := h2(hash)
   196  		for ; ; seq = seq.next() {
   197  			g := t.groups.group(typ, seq.offset)
   198  			match := g.ctrls().matchH2(h2Hash)
   199  
   200  			// Look for an existing slot containing this key.
   201  			for match != 0 {
   202  				i := match.first()
   203  
   204  				slotKey := g.key(typ, i)
   205  				slotKeyOrig := slotKey
   206  				if typ.IndirectKey() {
   207  					slotKey = *((*unsafe.Pointer)(slotKey))
   208  				}
   209  				if typ.Key.Equal(key, slotKey) {
   210  					if typ.NeedKeyUpdate() {
   211  						typedmemmove(typ.Key, slotKey, key)
   212  					}
   213  
   214  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   215  					if typ.IndirectElem() {
   216  						slotElem = *((*unsafe.Pointer)(slotElem))
   217  					}
   218  
   219  					t.checkInvariants(typ, m)
   220  					break outer
   221  				}
   222  				match = match.removeFirst()
   223  			}
   224  
   225  			// No existing slot for this key in this group. Is this the end
   226  			// of the probe sequence?
   227  			match = g.ctrls().matchEmpty()
   228  			if match != 0 {
   229  				// Finding an empty slot means we've reached the end of
   230  				// the probe sequence.
   231  
   232  				var i uintptr
   233  
   234  				// If we found a deleted slot along the way, we
   235  				// can replace it without consuming growthLeft.
   236  				if firstDeletedGroup.data != nil {
   237  					g = firstDeletedGroup
   238  					i = firstDeletedSlot
   239  					t.growthLeft++ // will be decremented below to become a no-op.
   240  				} else {
   241  					// Otherwise, use the empty slot.
   242  					i = match.first()
   243  				}
   244  
   245  				// If there is room left to grow, just insert the new entry.
   246  				if t.growthLeft > 0 {
   247  					slotKey := g.key(typ, i)
   248  					slotKeyOrig := slotKey
   249  					if typ.IndirectKey() {
   250  						kmem := newobject(typ.Key)
   251  						*(*unsafe.Pointer)(slotKey) = kmem
   252  						slotKey = kmem
   253  					}
   254  					typedmemmove(typ.Key, slotKey, key)
   255  
   256  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   257  					if typ.IndirectElem() {
   258  						emem := newobject(typ.Elem)
   259  						*(*unsafe.Pointer)(slotElem) = emem
   260  						slotElem = emem
   261  					}
   262  
   263  					g.ctrls().set(i, ctrl(h2Hash))
   264  					t.growthLeft--
   265  					t.used++
   266  					m.used++
   267  
   268  					t.checkInvariants(typ, m)
   269  					break outer
   270  				}
   271  
   272  				t.rehash(typ, m)
   273  				continue outer
   274  			}
   275  
   276  			// No empty slots in this group. Check for a deleted
   277  			// slot, which we'll use if we don't find a match later
   278  			// in the probe sequence.
   279  			//
   280  			// We only need to remember a single deleted slot.
   281  			if firstDeletedGroup.data == nil {
   282  				// Since we already checked for empty slots
   283  				// above, matches here must be deleted slots.
   284  				match = g.ctrls().matchEmptyOrDeleted()
   285  				if match != 0 {
   286  					firstDeletedGroup = g
   287  					firstDeletedSlot = match.first()
   288  				}
   289  			}
   290  		}
   291  	}
   292  
   293  	if m.writing == 0 {
   294  		fatal("concurrent map writes")
   295  	}
   296  	m.writing ^= 1
   297  
   298  	return slotElem
   299  }
   300  

View as plain text