Source file src/internal/runtime/maps/runtime_fast64.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/race"
    10  	"internal/runtime/sys"
    11  	"unsafe"
    12  )
    13  
    14  //go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
    15  func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
    16  	if race.Enabled && m != nil {
    17  		callerpc := sys.GetCallerPC()
    18  		pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64)
    19  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    20  	}
    21  
    22  	if m == nil || m.Used() == 0 {
    23  		return unsafe.Pointer(&zeroVal[0])
    24  	}
    25  
    26  	if m.writing != 0 {
    27  		fatal("concurrent map read and map write")
    28  		return nil
    29  	}
    30  
    31  	if m.dirLen == 0 {
    32  		g := groupReference{
    33  			data: m.dirPtr,
    34  		}
    35  		full := g.ctrls().matchFull()
    36  		slotKey := g.key(typ, 0)
    37  		slotSize := typ.SlotSize
    38  		for full != 0 {
    39  			if key == *(*uint64)(slotKey) && full.lowestSet() {
    40  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
    41  				return slotElem
    42  			}
    43  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    44  			full = full.shiftOutLowest()
    45  		}
    46  		return unsafe.Pointer(&zeroVal[0])
    47  	}
    48  
    49  	k := key
    50  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
    51  
    52  	// Select table.
    53  	idx := m.directoryIndex(hash)
    54  	t := m.directoryAt(idx)
    55  
    56  	// Probe table.
    57  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    58  	h2Hash := h2(hash)
    59  	for ; ; seq = seq.next() {
    60  		g := t.groups.group(typ, seq.offset)
    61  
    62  		match := g.ctrls().matchH2(h2Hash)
    63  
    64  		for match != 0 {
    65  			i := match.first()
    66  
    67  			slotKey := g.key(typ, i)
    68  			if key == *(*uint64)(slotKey) {
    69  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
    70  				return slotElem
    71  			}
    72  			match = match.removeFirst()
    73  		}
    74  
    75  		match = g.ctrls().matchEmpty()
    76  		if match != 0 {
    77  			// Finding an empty slot means we've reached the end of
    78  			// the probe sequence.
    79  			return unsafe.Pointer(&zeroVal[0])
    80  		}
    81  	}
    82  }
    83  
    84  //go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
    85  func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Pointer, bool) {
    86  	if race.Enabled && m != nil {
    87  		callerpc := sys.GetCallerPC()
    88  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64)
    89  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    90  	}
    91  
    92  	if m == nil || m.Used() == 0 {
    93  		return unsafe.Pointer(&zeroVal[0]), false
    94  	}
    95  
    96  	if m.writing != 0 {
    97  		fatal("concurrent map read and map write")
    98  		return nil, false
    99  	}
   100  
   101  	if m.dirLen == 0 {
   102  		g := groupReference{
   103  			data: m.dirPtr,
   104  		}
   105  		full := g.ctrls().matchFull()
   106  		slotKey := g.key(typ, 0)
   107  		slotSize := typ.SlotSize
   108  		for full != 0 {
   109  			if key == *(*uint64)(slotKey) && full.lowestSet() {
   110  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
   111  				return slotElem, true
   112  			}
   113  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
   114  			full = full.shiftOutLowest()
   115  		}
   116  		return unsafe.Pointer(&zeroVal[0]), false
   117  	}
   118  
   119  	k := key
   120  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   121  
   122  	// Select table.
   123  	idx := m.directoryIndex(hash)
   124  	t := m.directoryAt(idx)
   125  
   126  	// Probe table.
   127  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   128  
   129  	h2Hash := h2(hash)
   130  	for ; ; seq = seq.next() {
   131  		g := t.groups.group(typ, seq.offset)
   132  
   133  		match := g.ctrls().matchH2(h2Hash)
   134  
   135  		for match != 0 {
   136  			i := match.first()
   137  
   138  			slotKey := g.key(typ, i)
   139  			if key == *(*uint64)(slotKey) {
   140  				slotElem := unsafe.Pointer(uintptr(slotKey) + 8)
   141  				return slotElem, true
   142  			}
   143  			match = match.removeFirst()
   144  		}
   145  
   146  		match = g.ctrls().matchEmpty()
   147  		if match != 0 {
   148  			// Finding an empty slot means we've reached the end of
   149  			// the probe sequence.
   150  			return unsafe.Pointer(&zeroVal[0]), false
   151  		}
   152  	}
   153  }
   154  
   155  func (m *Map) putSlotSmallFast64(typ *abi.MapType, hash uintptr, key uint64) unsafe.Pointer {
   156  	g := groupReference{
   157  		data: m.dirPtr,
   158  	}
   159  
   160  	match := g.ctrls().matchH2(h2(hash))
   161  
   162  	// Look for an existing slot containing this key.
   163  	for match != 0 {
   164  		i := match.first()
   165  
   166  		slotKey := g.key(typ, i)
   167  		if key == *(*uint64)(slotKey) {
   168  			slotElem := g.elem(typ, i)
   169  			return slotElem
   170  		}
   171  		match = match.removeFirst()
   172  	}
   173  
   174  	// There can't be deleted slots, small maps can't have them
   175  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   176  	// more efficient than matchEmpty.
   177  	match = g.ctrls().matchEmptyOrDeleted()
   178  	if match == 0 {
   179  		fatal("small map with no empty slot (concurrent map writes?)")
   180  	}
   181  
   182  	i := match.first()
   183  
   184  	slotKey := g.key(typ, i)
   185  	*(*uint64)(slotKey) = key
   186  
   187  	slotElem := g.elem(typ, i)
   188  
   189  	g.ctrls().set(i, ctrl(h2(hash)))
   190  	m.used++
   191  
   192  	return slotElem
   193  }
   194  
   195  //go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
   196  func runtime_mapassign_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
   197  	if m == nil {
   198  		panic(errNilAssign)
   199  	}
   200  	if race.Enabled {
   201  		callerpc := sys.GetCallerPC()
   202  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast64)
   203  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   204  	}
   205  	if m.writing != 0 {
   206  		fatal("concurrent map writes")
   207  	}
   208  
   209  	k := key
   210  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   211  
   212  	// Set writing after calling Hasher, since Hasher may panic, in which
   213  	// case we have not actually done a write.
   214  	m.writing ^= 1 // toggle, see comment on writing
   215  
   216  	if m.dirPtr == nil {
   217  		m.growToSmall(typ)
   218  	}
   219  
   220  	if m.dirLen == 0 {
   221  		if m.used < abi.MapGroupSlots {
   222  			elem := m.putSlotSmallFast64(typ, hash, key)
   223  
   224  			if m.writing == 0 {
   225  				fatal("concurrent map writes")
   226  			}
   227  			m.writing ^= 1
   228  
   229  			return elem
   230  		}
   231  
   232  		// Can't fit another entry, grow to full size map.
   233  		m.growToTable(typ)
   234  	}
   235  
   236  	var slotElem unsafe.Pointer
   237  outer:
   238  	for {
   239  		// Select table.
   240  		idx := m.directoryIndex(hash)
   241  		t := m.directoryAt(idx)
   242  
   243  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   244  
   245  		// As we look for a match, keep track of the first deleted slot
   246  		// we find, which we'll use to insert the new entry if
   247  		// necessary.
   248  		var firstDeletedGroup groupReference
   249  		var firstDeletedSlot uintptr
   250  
   251  		h2Hash := h2(hash)
   252  		for ; ; seq = seq.next() {
   253  			g := t.groups.group(typ, seq.offset)
   254  			match := g.ctrls().matchH2(h2Hash)
   255  
   256  			// Look for an existing slot containing this key.
   257  			for match != 0 {
   258  				i := match.first()
   259  
   260  				slotKey := g.key(typ, i)
   261  				if key == *(*uint64)(slotKey) {
   262  					slotElem = g.elem(typ, i)
   263  
   264  					t.checkInvariants(typ, m)
   265  					break outer
   266  				}
   267  				match = match.removeFirst()
   268  			}
   269  
   270  			// No existing slot for this key in this group. Is this the end
   271  			// of the probe sequence?
   272  			match = g.ctrls().matchEmptyOrDeleted()
   273  			if match == 0 {
   274  				continue // nothing but filled slots. Keep probing.
   275  			}
   276  			i := match.first()
   277  			if g.ctrls().get(i) == ctrlDeleted {
   278  				// There are some deleted slots. Remember
   279  				// the first one, and keep probing.
   280  				if firstDeletedGroup.data == nil {
   281  					firstDeletedGroup = g
   282  					firstDeletedSlot = i
   283  				}
   284  				continue
   285  			}
   286  			// We've found an empty slot, which means we've reached the end of
   287  			// the probe sequence.
   288  
   289  			// If we found a deleted slot along the way, we can
   290  			// replace it without consuming growthLeft.
   291  			if firstDeletedGroup.data != nil {
   292  				g = firstDeletedGroup
   293  				i = firstDeletedSlot
   294  				t.growthLeft++ // will be decremented below to become a no-op.
   295  			}
   296  
   297  			// If we have no space left, first try to remove some tombstones.
   298  			if t.growthLeft == 0 {
   299  				t.pruneTombstones(typ, m)
   300  			}
   301  
   302  			// If there is room left to grow, just insert the new entry.
   303  			if t.growthLeft > 0 {
   304  				slotKey := g.key(typ, i)
   305  				*(*uint64)(slotKey) = key
   306  
   307  				slotElem = g.elem(typ, i)
   308  
   309  				g.ctrls().set(i, ctrl(h2Hash))
   310  				t.growthLeft--
   311  				t.used++
   312  				m.used++
   313  
   314  				t.checkInvariants(typ, m)
   315  				break outer
   316  			}
   317  
   318  			t.rehash(typ, m)
   319  			continue outer
   320  		}
   321  	}
   322  
   323  	if m.writing == 0 {
   324  		fatal("concurrent map writes")
   325  	}
   326  	m.writing ^= 1
   327  
   328  	return slotElem
   329  }
   330  
   331  func (m *Map) putSlotSmallFastPtr(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
   332  	g := groupReference{
   333  		data: m.dirPtr,
   334  	}
   335  
   336  	match := g.ctrls().matchH2(h2(hash))
   337  
   338  	// Look for an existing slot containing this key.
   339  	for match != 0 {
   340  		i := match.first()
   341  
   342  		slotKey := g.key(typ, i)
   343  		if key == *(*unsafe.Pointer)(slotKey) {
   344  			slotElem := g.elem(typ, i)
   345  			return slotElem
   346  		}
   347  		match = match.removeFirst()
   348  	}
   349  
   350  	// There can't be deleted slots, small maps can't have them
   351  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   352  	// more efficient than matchEmpty.
   353  	match = g.ctrls().matchEmptyOrDeleted()
   354  	if match == 0 {
   355  		fatal("small map with no empty slot (concurrent map writes?)")
   356  	}
   357  
   358  	i := match.first()
   359  
   360  	slotKey := g.key(typ, i)
   361  	*(*unsafe.Pointer)(slotKey) = key
   362  
   363  	slotElem := g.elem(typ, i)
   364  
   365  	g.ctrls().set(i, ctrl(h2(hash)))
   366  	m.used++
   367  
   368  	return slotElem
   369  }
   370  
   371  // Key is a 64-bit pointer (only called on 64-bit GOARCH).
   372  //
   373  //go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
   374  func runtime_mapassign_fast64ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   375  	if m == nil {
   376  		panic(errNilAssign)
   377  	}
   378  	if race.Enabled {
   379  		callerpc := sys.GetCallerPC()
   380  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast64ptr)
   381  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   382  	}
   383  	if m.writing != 0 {
   384  		fatal("concurrent map writes")
   385  	}
   386  
   387  	k := key
   388  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   389  
   390  	// Set writing after calling Hasher, since Hasher may panic, in which
   391  	// case we have not actually done a write.
   392  	m.writing ^= 1 // toggle, see comment on writing
   393  
   394  	if m.dirPtr == nil {
   395  		m.growToSmall(typ)
   396  	}
   397  
   398  	if m.dirLen == 0 {
   399  		if m.used < abi.MapGroupSlots {
   400  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   401  
   402  			if m.writing == 0 {
   403  				fatal("concurrent map writes")
   404  			}
   405  			m.writing ^= 1
   406  
   407  			return elem
   408  		}
   409  
   410  		// Can't fit another entry, grow to full size map.
   411  		m.growToTable(typ)
   412  	}
   413  
   414  	var slotElem unsafe.Pointer
   415  outer:
   416  	for {
   417  		// Select table.
   418  		idx := m.directoryIndex(hash)
   419  		t := m.directoryAt(idx)
   420  
   421  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   422  
   423  		// As we look for a match, keep track of the first deleted slot
   424  		// we find, which we'll use to insert the new entry if
   425  		// necessary.
   426  		var firstDeletedGroup groupReference
   427  		var firstDeletedSlot uintptr
   428  
   429  		h2Hash := h2(hash)
   430  		for ; ; seq = seq.next() {
   431  			g := t.groups.group(typ, seq.offset)
   432  			match := g.ctrls().matchH2(h2Hash)
   433  
   434  			// Look for an existing slot containing this key.
   435  			for match != 0 {
   436  				i := match.first()
   437  
   438  				slotKey := g.key(typ, i)
   439  				if key == *(*unsafe.Pointer)(slotKey) {
   440  					slotElem = g.elem(typ, i)
   441  
   442  					t.checkInvariants(typ, m)
   443  					break outer
   444  				}
   445  				match = match.removeFirst()
   446  			}
   447  
   448  			// No existing slot for this key in this group. Is this the end
   449  			// of the probe sequence?
   450  			match = g.ctrls().matchEmptyOrDeleted()
   451  			if match == 0 {
   452  				continue // nothing but filled slots. Keep probing.
   453  			}
   454  			i := match.first()
   455  			if g.ctrls().get(i) == ctrlDeleted {
   456  				// There are some deleted slots. Remember
   457  				// the first one, and keep probing.
   458  				if firstDeletedGroup.data == nil {
   459  					firstDeletedGroup = g
   460  					firstDeletedSlot = i
   461  				}
   462  				continue
   463  			}
   464  			// We've found an empty slot, which means we've reached the end of
   465  			// the probe sequence.
   466  
   467  			// If we found a deleted slot along the way, we can
   468  			// replace it without consuming growthLeft.
   469  			if firstDeletedGroup.data != nil {
   470  				g = firstDeletedGroup
   471  				i = firstDeletedSlot
   472  				t.growthLeft++ // will be decremented below to become a no-op.
   473  			}
   474  
   475  			// If there is room left to grow, just insert the new entry.
   476  			if t.growthLeft > 0 {
   477  				slotKey := g.key(typ, i)
   478  				*(*unsafe.Pointer)(slotKey) = key
   479  
   480  				slotElem = g.elem(typ, i)
   481  
   482  				g.ctrls().set(i, ctrl(h2Hash))
   483  				t.growthLeft--
   484  				t.used++
   485  				m.used++
   486  
   487  				t.checkInvariants(typ, m)
   488  				break outer
   489  			}
   490  
   491  			t.rehash(typ, m)
   492  			continue outer
   493  		}
   494  	}
   495  
   496  	if m.writing == 0 {
   497  		fatal("concurrent map writes")
   498  	}
   499  	m.writing ^= 1
   500  
   501  	return slotElem
   502  }
   503  
   504  //go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
   505  func runtime_mapdelete_fast64(typ *abi.MapType, m *Map, key uint64) {
   506  	if race.Enabled {
   507  		callerpc := sys.GetCallerPC()
   508  		pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64)
   509  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   510  	}
   511  
   512  	if m == nil || m.Used() == 0 {
   513  		return
   514  	}
   515  
   516  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   517  }
   518  

View as plain text