Source file src/internal/runtime/maps/runtime_faststr.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package maps
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/race"
    11  	"internal/runtime/sys"
    12  	"unsafe"
    13  )
    14  
    15  func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Pointer {
    16  	g := groupReference{
    17  		data: m.dirPtr,
    18  	}
    19  
    20  	ctrls := *g.ctrls()
    21  	slotKey := g.key(typ, 0)
    22  	slotSize := typ.SlotSize
    23  
    24  	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight,
    25  	// where there are 8 keys to check, all of which don't quick-match the lookup key.
    26  	// In that case, we can save hashing the lookup key. That savings is worth this extra code
    27  	// for strings that are long enough that hashing is expensive.
    28  	if len(key) > 64 {
    29  		// String hashing and equality might be expensive. Do a quick check first.
    30  		j := abi.MapGroupSlots
    31  		for i := range abi.MapGroupSlots {
    32  			if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
    33  				if j < abi.MapGroupSlots {
    34  					// 2 strings both passed the quick equality test.
    35  					// Break out of this loop and do it the slow way.
    36  					goto dohash
    37  				}
    38  				j = i
    39  			}
    40  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    41  			ctrls >>= 8
    42  		}
    43  		if j == abi.MapGroupSlots {
    44  			// No slot passed the quick test.
    45  			return nil
    46  		}
    47  		// There's exactly one slot that passed the quick test. Do the single expensive comparison.
    48  		slotKey = g.key(typ, uintptr(j))
    49  		if key == *(*string)(slotKey) {
    50  			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    51  		}
    52  		return nil
    53  	}
    54  
    55  dohash:
    56  	// This path will cost 1 hash and 1+ε comparisons.
    57  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
    58  	h2 := uint8(h2(hash))
    59  	ctrls = *g.ctrls()
    60  	slotKey = g.key(typ, 0)
    61  
    62  	for range abi.MapGroupSlots {
    63  		if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
    64  			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    65  		}
    66  		slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    67  		ctrls >>= 8
    68  	}
    69  	return nil
    70  }
    71  
    72  // Returns true if a and b might be equal.
    73  // Returns false if a and b are definitely not equal.
    74  // Requires len(a)>=8.
    75  func longStringQuickEqualityTest(a, b string) bool {
    76  	if len(a) != len(b) {
    77  		return false
    78  	}
    79  	x, y := stringPtr(a), stringPtr(b)
    80  	// Check first 8 bytes.
    81  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
    82  		return false
    83  	}
    84  	// Check last 8 bytes.
    85  	x = unsafe.Pointer(uintptr(x) + uintptr(len(a)) - 8)
    86  	y = unsafe.Pointer(uintptr(y) + uintptr(len(a)) - 8)
    87  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
    88  		return false
    89  	}
    90  	return true
    91  }
    92  func stringPtr(s string) unsafe.Pointer {
    93  	type stringStruct struct {
    94  		ptr unsafe.Pointer
    95  		len int
    96  	}
    97  	return (*stringStruct)(unsafe.Pointer(&s)).ptr
    98  }
    99  
   100  //go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
   101  func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
   102  	if race.Enabled && m != nil {
   103  		callerpc := sys.GetCallerPC()
   104  		pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr)
   105  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   106  	}
   107  
   108  	if m == nil || m.Used() == 0 {
   109  		return unsafe.Pointer(&zeroVal[0])
   110  	}
   111  
   112  	if m.writing != 0 {
   113  		fatal("concurrent map read and map write")
   114  		return nil
   115  	}
   116  
   117  	if m.dirLen <= 0 {
   118  		elem := m.getWithoutKeySmallFastStr(typ, key)
   119  		if elem == nil {
   120  			return unsafe.Pointer(&zeroVal[0])
   121  		}
   122  		return elem
   123  	}
   124  
   125  	k := key
   126  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   127  
   128  	// Select table.
   129  	idx := m.directoryIndex(hash)
   130  	t := m.directoryAt(idx)
   131  
   132  	// Probe table.
   133  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   134  	h2Hash := h2(hash)
   135  	for ; ; seq = seq.next() {
   136  		g := t.groups.group(typ, seq.offset)
   137  
   138  		match := g.ctrls().matchH2(h2Hash)
   139  
   140  		for match != 0 {
   141  			i := match.first()
   142  
   143  			slotKey := g.key(typ, i)
   144  			if key == *(*string)(slotKey) {
   145  				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
   146  				return slotElem
   147  			}
   148  			match = match.removeFirst()
   149  		}
   150  
   151  		match = g.ctrls().matchEmpty()
   152  		if match != 0 {
   153  			// Finding an empty slot means we've reached the end of
   154  			// the probe sequence.
   155  			return unsafe.Pointer(&zeroVal[0])
   156  		}
   157  	}
   158  }
   159  
   160  //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
   161  func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Pointer, bool) {
   162  	if race.Enabled && m != nil {
   163  		callerpc := sys.GetCallerPC()
   164  		pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr)
   165  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   166  	}
   167  
   168  	if m == nil || m.Used() == 0 {
   169  		return unsafe.Pointer(&zeroVal[0]), false
   170  	}
   171  
   172  	if m.writing != 0 {
   173  		fatal("concurrent map read and map write")
   174  		return nil, false
   175  	}
   176  
   177  	if m.dirLen <= 0 {
   178  		elem := m.getWithoutKeySmallFastStr(typ, key)
   179  		if elem == nil {
   180  			return unsafe.Pointer(&zeroVal[0]), false
   181  		}
   182  		return elem, true
   183  	}
   184  
   185  	k := key
   186  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   187  
   188  	// Select table.
   189  	idx := m.directoryIndex(hash)
   190  	t := m.directoryAt(idx)
   191  
   192  	// Probe table.
   193  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   194  	h2Hash := h2(hash)
   195  	for ; ; seq = seq.next() {
   196  		g := t.groups.group(typ, seq.offset)
   197  
   198  		match := g.ctrls().matchH2(h2Hash)
   199  
   200  		for match != 0 {
   201  			i := match.first()
   202  
   203  			slotKey := g.key(typ, i)
   204  			if key == *(*string)(slotKey) {
   205  				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
   206  				return slotElem, true
   207  			}
   208  			match = match.removeFirst()
   209  		}
   210  
   211  		match = g.ctrls().matchEmpty()
   212  		if match != 0 {
   213  			// Finding an empty slot means we've reached the end of
   214  			// the probe sequence.
   215  			return unsafe.Pointer(&zeroVal[0]), false
   216  		}
   217  	}
   218  }
   219  
   220  func (m *Map) putSlotSmallFastStr(typ *abi.MapType, hash uintptr, key string) unsafe.Pointer {
   221  	g := groupReference{
   222  		data: m.dirPtr,
   223  	}
   224  
   225  	match := g.ctrls().matchH2(h2(hash))
   226  
   227  	// Look for an existing slot containing this key.
   228  	for match != 0 {
   229  		i := match.first()
   230  
   231  		slotKey := g.key(typ, i)
   232  		if key == *(*string)(slotKey) {
   233  			// Key needs update, as the backing storage may differ.
   234  			*(*string)(slotKey) = key
   235  			slotElem := g.elem(typ, i)
   236  			return slotElem
   237  		}
   238  		match = match.removeFirst()
   239  	}
   240  
   241  	// There can't be deleted slots, small maps can't have them
   242  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   243  	// more efficient than matchEmpty.
   244  	match = g.ctrls().matchEmptyOrDeleted()
   245  	if match == 0 {
   246  		fatal("small map with no empty slot (concurrent map writes?)")
   247  	}
   248  
   249  	i := match.first()
   250  
   251  	slotKey := g.key(typ, i)
   252  	*(*string)(slotKey) = key
   253  
   254  	slotElem := g.elem(typ, i)
   255  
   256  	g.ctrls().set(i, ctrl(h2(hash)))
   257  	m.used++
   258  
   259  	return slotElem
   260  }
   261  
   262  //go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
   263  func runtime_mapassign_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
   264  	if m == nil {
   265  		panic(errNilAssign)
   266  	}
   267  	if race.Enabled {
   268  		callerpc := sys.GetCallerPC()
   269  		pc := abi.FuncPCABIInternal(runtime_mapassign_faststr)
   270  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   271  	}
   272  	if m.writing != 0 {
   273  		fatal("concurrent map writes")
   274  	}
   275  
   276  	k := key
   277  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   278  
   279  	// Set writing after calling Hasher, since Hasher may panic, in which
   280  	// case we have not actually done a write.
   281  	m.writing ^= 1 // toggle, see comment on writing
   282  
   283  	if m.dirPtr == nil {
   284  		m.growToSmall(typ)
   285  	}
   286  
   287  	if m.dirLen == 0 {
   288  		if m.used < abi.MapGroupSlots {
   289  			elem := m.putSlotSmallFastStr(typ, hash, key)
   290  
   291  			if m.writing == 0 {
   292  				fatal("concurrent map writes")
   293  			}
   294  			m.writing ^= 1
   295  
   296  			return elem
   297  		}
   298  
   299  		// Can't fit another entry, grow to full size map.
   300  		m.growToTable(typ)
   301  	}
   302  
   303  	var slotElem unsafe.Pointer
   304  outer:
   305  	for {
   306  		// Select table.
   307  		idx := m.directoryIndex(hash)
   308  		t := m.directoryAt(idx)
   309  
   310  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   311  
   312  		// As we look for a match, keep track of the first deleted slot
   313  		// we find, which we'll use to insert the new entry if
   314  		// necessary.
   315  		var firstDeletedGroup groupReference
   316  		var firstDeletedSlot uintptr
   317  
   318  		h2Hash := h2(hash)
   319  		for ; ; seq = seq.next() {
   320  			g := t.groups.group(typ, seq.offset)
   321  			match := g.ctrls().matchH2(h2Hash)
   322  
   323  			// Look for an existing slot containing this key.
   324  			for match != 0 {
   325  				i := match.first()
   326  
   327  				slotKey := g.key(typ, i)
   328  				if key == *(*string)(slotKey) {
   329  					// Key needs update, as the backing
   330  					// storage may differ.
   331  					*(*string)(slotKey) = key
   332  					slotElem = g.elem(typ, i)
   333  
   334  					t.checkInvariants(typ, m)
   335  					break outer
   336  				}
   337  				match = match.removeFirst()
   338  			}
   339  
   340  			// No existing slot for this key in this group. Is this the end
   341  			// of the probe sequence?
   342  			match = g.ctrls().matchEmptyOrDeleted()
   343  			if match == 0 {
   344  				continue // nothing but filled slots. Keep probing.
   345  			}
   346  			i := match.first()
   347  			if g.ctrls().get(i) == ctrlDeleted {
   348  				// There are some deleted slots. Remember
   349  				// the first one, and keep probing.
   350  				if firstDeletedGroup.data == nil {
   351  					firstDeletedGroup = g
   352  					firstDeletedSlot = i
   353  				}
   354  				continue
   355  			}
   356  			// We've found an empty slot, which means we've reached the end of
   357  			// the probe sequence.
   358  
   359  			// If we found a deleted slot along the way, we can
   360  			// replace it without consuming growthLeft.
   361  			if firstDeletedGroup.data != nil {
   362  				g = firstDeletedGroup
   363  				i = firstDeletedSlot
   364  				t.growthLeft++ // will be decremented below to become a no-op.
   365  			}
   366  
   367  			// If we have no space left, first try to remove some tombstones.
   368  			if t.growthLeft == 0 {
   369  				t.pruneTombstones(typ, m)
   370  			}
   371  
   372  			// If there is room left to grow, just insert the new entry.
   373  			if t.growthLeft > 0 {
   374  				slotKey := g.key(typ, i)
   375  				*(*string)(slotKey) = key
   376  
   377  				slotElem = g.elem(typ, i)
   378  
   379  				g.ctrls().set(i, ctrl(h2Hash))
   380  				t.growthLeft--
   381  				t.used++
   382  				m.used++
   383  
   384  				t.checkInvariants(typ, m)
   385  				break outer
   386  			}
   387  
   388  			t.rehash(typ, m)
   389  			continue outer
   390  		}
   391  	}
   392  
   393  	if m.writing == 0 {
   394  		fatal("concurrent map writes")
   395  	}
   396  	m.writing ^= 1
   397  
   398  	return slotElem
   399  }
   400  
   401  //go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
   402  func runtime_mapdelete_faststr(typ *abi.MapType, m *Map, key string) {
   403  	if race.Enabled {
   404  		callerpc := sys.GetCallerPC()
   405  		pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr)
   406  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   407  	}
   408  
   409  	if m == nil || m.Used() == 0 {
   410  		return
   411  	}
   412  
   413  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   414  }
   415  

View as plain text