Source file src/internal/runtime/maps/runtime_fast32.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/race"
    10  	"internal/runtime/sys"
    11  	"unsafe"
    12  )
    13  
    14  //go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
    15  func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
    16  	if race.Enabled && m != nil {
    17  		callerpc := sys.GetCallerPC()
    18  		pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32)
    19  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    20  	}
    21  
    22  	if m == nil || m.Used() == 0 {
    23  		return unsafe.Pointer(&zeroVal[0])
    24  	}
    25  
    26  	if m.writing != 0 {
    27  		fatal("concurrent map read and map write")
    28  		return nil
    29  	}
    30  
    31  	if m.dirLen == 0 {
    32  		g := groupReference{
    33  			data: m.dirPtr,
    34  		}
    35  		full := g.ctrls().matchFull()
    36  		slotKey := g.key(typ, 0)
    37  		slotSize := typ.SlotSize
    38  		for full != 0 {
    39  			if key == *(*uint32)(slotKey) && full.lowestSet() {
    40  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
    41  				return slotElem
    42  			}
    43  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    44  			full = full.shiftOutLowest()
    45  		}
    46  		return unsafe.Pointer(&zeroVal[0])
    47  	}
    48  
    49  	k := key
    50  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
    51  
    52  	// Select table.
    53  	idx := m.directoryIndex(hash)
    54  	t := m.directoryAt(idx)
    55  
    56  	// Probe table.
    57  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    58  	h2Hash := h2(hash)
    59  	for ; ; seq = seq.next() {
    60  		g := t.groups.group(typ, seq.offset)
    61  
    62  		match := g.ctrls().matchH2(h2Hash)
    63  
    64  		for match != 0 {
    65  			i := match.first()
    66  
    67  			slotKey := g.key(typ, i)
    68  			if key == *(*uint32)(slotKey) {
    69  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
    70  				return slotElem
    71  			}
    72  			match = match.removeFirst()
    73  		}
    74  
    75  		match = g.ctrls().matchEmpty()
    76  		if match != 0 {
    77  			// Finding an empty slot means we've reached the end of
    78  			// the probe sequence.
    79  			return unsafe.Pointer(&zeroVal[0])
    80  		}
    81  	}
    82  }
    83  
    84  //go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
    85  func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Pointer, bool) {
    86  	if race.Enabled && m != nil {
    87  		callerpc := sys.GetCallerPC()
    88  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32)
    89  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    90  	}
    91  
    92  	if m == nil || m.Used() == 0 {
    93  		return unsafe.Pointer(&zeroVal[0]), false
    94  	}
    95  
    96  	if m.writing != 0 {
    97  		fatal("concurrent map read and map write")
    98  		return nil, false
    99  	}
   100  
   101  	if m.dirLen == 0 {
   102  		g := groupReference{
   103  			data: m.dirPtr,
   104  		}
   105  		full := g.ctrls().matchFull()
   106  		slotKey := g.key(typ, 0)
   107  		slotSize := typ.SlotSize
   108  		for full != 0 {
   109  			if key == *(*uint32)(slotKey) && full.lowestSet() {
   110  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
   111  				return slotElem, true
   112  			}
   113  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
   114  			full = full.shiftOutLowest()
   115  		}
   116  		return unsafe.Pointer(&zeroVal[0]), false
   117  	}
   118  
   119  	k := key
   120  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   121  
   122  	// Select table.
   123  	idx := m.directoryIndex(hash)
   124  	t := m.directoryAt(idx)
   125  
   126  	// Probe table.
   127  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   128  	h2Hash := h2(hash)
   129  	for ; ; seq = seq.next() {
   130  		g := t.groups.group(typ, seq.offset)
   131  
   132  		match := g.ctrls().matchH2(h2Hash)
   133  
   134  		for match != 0 {
   135  			i := match.first()
   136  
   137  			slotKey := g.key(typ, i)
   138  			if key == *(*uint32)(slotKey) {
   139  				slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
   140  				return slotElem, true
   141  			}
   142  			match = match.removeFirst()
   143  		}
   144  
   145  		match = g.ctrls().matchEmpty()
   146  		if match != 0 {
   147  			// Finding an empty slot means we've reached the end of
   148  			// the probe sequence.
   149  			return unsafe.Pointer(&zeroVal[0]), false
   150  		}
   151  	}
   152  }
   153  
   154  func (m *Map) putSlotSmallFast32(typ *abi.MapType, hash uintptr, key uint32) unsafe.Pointer {
   155  	g := groupReference{
   156  		data: m.dirPtr,
   157  	}
   158  
   159  	match := g.ctrls().matchH2(h2(hash))
   160  
   161  	// Look for an existing slot containing this key.
   162  	for match != 0 {
   163  		i := match.first()
   164  
   165  		slotKey := g.key(typ, i)
   166  		if key == *(*uint32)(slotKey) {
   167  			slotElem := g.elem(typ, i)
   168  			return slotElem
   169  		}
   170  		match = match.removeFirst()
   171  	}
   172  
   173  	// There can't be deleted slots, small maps can't have them
   174  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   175  	// more efficient than matchEmpty.
   176  	match = g.ctrls().matchEmptyOrDeleted()
   177  	if match == 0 {
   178  		fatal("small map with no empty slot (concurrent map writes?)")
   179  	}
   180  
   181  	i := match.first()
   182  
   183  	slotKey := g.key(typ, i)
   184  	*(*uint32)(slotKey) = key
   185  
   186  	slotElem := g.elem(typ, i)
   187  
   188  	g.ctrls().set(i, ctrl(h2(hash)))
   189  	m.used++
   190  
   191  	return slotElem
   192  }
   193  
   194  //go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
   195  func runtime_mapassign_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
   196  	if m == nil {
   197  		panic(errNilAssign)
   198  	}
   199  	if race.Enabled {
   200  		callerpc := sys.GetCallerPC()
   201  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast32)
   202  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   203  	}
   204  	if m.writing != 0 {
   205  		fatal("concurrent map writes")
   206  	}
   207  
   208  	k := key
   209  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   210  
   211  	// Set writing after calling Hasher, since Hasher may panic, in which
   212  	// case we have not actually done a write.
   213  	m.writing ^= 1 // toggle, see comment on writing
   214  
   215  	if m.dirPtr == nil {
   216  		m.growToSmall(typ)
   217  	}
   218  
   219  	if m.dirLen == 0 {
   220  		if m.used < abi.MapGroupSlots {
   221  			elem := m.putSlotSmallFast32(typ, hash, key)
   222  
   223  			if m.writing == 0 {
   224  				fatal("concurrent map writes")
   225  			}
   226  			m.writing ^= 1
   227  
   228  			return elem
   229  		}
   230  
   231  		// Can't fit another entry, grow to full size map.
   232  		m.growToTable(typ)
   233  	}
   234  
   235  	var slotElem unsafe.Pointer
   236  outer:
   237  	for {
   238  		// Select table.
   239  		idx := m.directoryIndex(hash)
   240  		t := m.directoryAt(idx)
   241  
   242  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   243  
   244  		// As we look for a match, keep track of the first deleted slot
   245  		// we find, which we'll use to insert the new entry if
   246  		// necessary.
   247  		var firstDeletedGroup groupReference
   248  		var firstDeletedSlot uintptr
   249  
   250  		h2Hash := h2(hash)
   251  		for ; ; seq = seq.next() {
   252  			g := t.groups.group(typ, seq.offset)
   253  			match := g.ctrls().matchH2(h2Hash)
   254  
   255  			// Look for an existing slot containing this key.
   256  			for match != 0 {
   257  				i := match.first()
   258  
   259  				slotKey := g.key(typ, i)
   260  				if key == *(*uint32)(slotKey) {
   261  					slotElem = g.elem(typ, i)
   262  
   263  					t.checkInvariants(typ, m)
   264  					break outer
   265  				}
   266  				match = match.removeFirst()
   267  			}
   268  
   269  			// No existing slot for this key in this group. Is this the end
   270  			// of the probe sequence?
   271  			match = g.ctrls().matchEmptyOrDeleted()
   272  			if match == 0 {
   273  				continue // nothing but filled slots. Keep probing.
   274  			}
   275  			i := match.first()
   276  			if g.ctrls().get(i) == ctrlDeleted {
   277  				// There are some deleted slots. Remember
   278  				// the first one, and keep probing.
   279  				if firstDeletedGroup.data == nil {
   280  					firstDeletedGroup = g
   281  					firstDeletedSlot = i
   282  				}
   283  				continue
   284  			}
   285  			// We've found an empty slot, which means we've reached the end of
   286  			// the probe sequence.
   287  
   288  			// If we found a deleted slot along the way, we can
   289  			// replace it without consuming growthLeft.
   290  			if firstDeletedGroup.data != nil {
   291  				g = firstDeletedGroup
   292  				i = firstDeletedSlot
   293  				t.growthLeft++ // will be decremented below to become a no-op.
   294  			}
   295  
   296  			// If we have no space left, first try to remove some tombstones.
   297  			if t.growthLeft == 0 {
   298  				t.pruneTombstones(typ, m)
   299  			}
   300  
   301  			// If there is room left to grow, just insert the new entry.
   302  			if t.growthLeft > 0 {
   303  				slotKey := g.key(typ, i)
   304  				*(*uint32)(slotKey) = key
   305  
   306  				slotElem = g.elem(typ, i)
   307  
   308  				g.ctrls().set(i, ctrl(h2Hash))
   309  				t.growthLeft--
   310  				t.used++
   311  				m.used++
   312  
   313  				t.checkInvariants(typ, m)
   314  				break outer
   315  			}
   316  
   317  			t.rehash(typ, m)
   318  			continue outer
   319  		}
   320  	}
   321  
   322  	if m.writing == 0 {
   323  		fatal("concurrent map writes")
   324  	}
   325  	m.writing ^= 1
   326  
   327  	return slotElem
   328  }
   329  
   330  // Key is a 32-bit pointer (only called on 32-bit GOARCH). This source is identical to fast64ptr.
   331  //
   332  // TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
   333  //
   334  //go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
   335  func runtime_mapassign_fast32ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   336  	if m == nil {
   337  		panic(errNilAssign)
   338  	}
   339  	if race.Enabled {
   340  		callerpc := sys.GetCallerPC()
   341  		pc := abi.FuncPCABIInternal(runtime_mapassign_fast32ptr)
   342  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   343  	}
   344  	if m.writing != 0 {
   345  		fatal("concurrent map writes")
   346  	}
   347  
   348  	k := key
   349  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   350  
   351  	// Set writing after calling Hasher, since Hasher may panic, in which
   352  	// case we have not actually done a write.
   353  	m.writing ^= 1 // toggle, see comment on writing
   354  
   355  	if m.dirPtr == nil {
   356  		m.growToSmall(typ)
   357  	}
   358  
   359  	if m.dirLen == 0 {
   360  		if m.used < abi.MapGroupSlots {
   361  			elem := m.putSlotSmallFastPtr(typ, hash, key)
   362  
   363  			if m.writing == 0 {
   364  				fatal("concurrent map writes")
   365  			}
   366  			m.writing ^= 1
   367  
   368  			return elem
   369  		}
   370  
   371  		// Can't fit another entry, grow to full size map.
   372  		m.growToTable(typ)
   373  	}
   374  
   375  	var slotElem unsafe.Pointer
   376  outer:
   377  	for {
   378  		// Select table.
   379  		idx := m.directoryIndex(hash)
   380  		t := m.directoryAt(idx)
   381  
   382  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   383  
   384  		// As we look for a match, keep track of the first deleted slot we
   385  		// find, which we'll use to insert the new entry if necessary.
   386  		var firstDeletedGroup groupReference
   387  		var firstDeletedSlot uintptr
   388  
   389  		h2Hash := h2(hash)
   390  		for ; ; seq = seq.next() {
   391  			g := t.groups.group(typ, seq.offset)
   392  			match := g.ctrls().matchH2(h2Hash)
   393  
   394  			// Look for an existing slot containing this key.
   395  			for match != 0 {
   396  				i := match.first()
   397  
   398  				slotKey := g.key(typ, i)
   399  				if key == *(*unsafe.Pointer)(slotKey) {
   400  					slotElem = g.elem(typ, i)
   401  
   402  					t.checkInvariants(typ, m)
   403  					break outer
   404  				}
   405  				match = match.removeFirst()
   406  			}
   407  
   408  			// No existing slot for this key in this group. Is this the end
   409  			// of the probe sequence?
   410  			match = g.ctrls().matchEmptyOrDeleted()
   411  			if match == 0 {
   412  				continue // nothing but filled slots. Keep probing.
   413  			}
   414  			i := match.first()
   415  			if g.ctrls().get(i) == ctrlDeleted {
   416  				// There are some deleted slots. Remember
   417  				// the first one, and keep probing.
   418  				if firstDeletedGroup.data == nil {
   419  					firstDeletedGroup = g
   420  					firstDeletedSlot = i
   421  				}
   422  				continue
   423  			}
   424  			// We've found an empty slot, which means we've reached the end of
   425  			// the probe sequence.
   426  
   427  			// If we found a deleted slot along the way, we can
   428  			// replace it without consuming growthLeft.
   429  			if firstDeletedGroup.data != nil {
   430  				g = firstDeletedGroup
   431  				i = firstDeletedSlot
   432  				t.growthLeft++ // will be decremented below to become a no-op.
   433  			}
   434  
   435  			// If there is room left to grow, just insert the new entry.
   436  			if t.growthLeft > 0 {
   437  				slotKey := g.key(typ, i)
   438  				*(*unsafe.Pointer)(slotKey) = key
   439  
   440  				slotElem = g.elem(typ, i)
   441  
   442  				g.ctrls().set(i, ctrl(h2Hash))
   443  				t.growthLeft--
   444  				t.used++
   445  				m.used++
   446  
   447  				t.checkInvariants(typ, m)
   448  				break outer
   449  			}
   450  
   451  			t.rehash(typ, m)
   452  			continue outer
   453  		}
   454  	}
   455  
   456  	if m.writing == 0 {
   457  		fatal("concurrent map writes")
   458  	}
   459  	m.writing ^= 1
   460  
   461  	return slotElem
   462  }
   463  
   464  //go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
   465  func runtime_mapdelete_fast32(typ *abi.MapType, m *Map, key uint32) {
   466  	if race.Enabled {
   467  		callerpc := sys.GetCallerPC()
   468  		pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32)
   469  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   470  	}
   471  
   472  	if m == nil || m.Used() == 0 {
   473  		return
   474  	}
   475  
   476  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   477  }
   478  

View as plain text