Source file src/internal/runtime/maps/runtime_faststr_swiss.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.swissmap
     6  
     7  package maps
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/race"
    13  	"internal/runtime/sys"
    14  	"unsafe"
    15  )
    16  
    17  func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer {
    18  	g := groupReference{
    19  		data: m.dirPtr,
    20  	}
    21  
    22  	ctrls := *g.ctrls()
    23  	slotKey := g.key(typ, 0)
    24  	slotSize := typ.SlotSize
    25  
    26  	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight,
    27  	// where there are 8 keys to check, all of which don't quick-match the lookup key.
    28  	// In that case, we can save hashing the lookup key. That savings is worth this extra code
    29  	// for strings that are long enough that hashing is expensive.
    30  	if len(key) > 64 {
    31  		// String hashing and equality might be expensive. Do a quick check first.
    32  		j := abi.SwissMapGroupSlots
    33  		for i := range abi.SwissMapGroupSlots {
    34  			if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
    35  				if j < abi.SwissMapGroupSlots {
    36  					// 2 strings both passed the quick equality test.
    37  					// Break out of this loop and do it the slow way.
    38  					goto dohash
    39  				}
    40  				j = i
    41  			}
    42  			slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    43  			ctrls >>= 8
    44  		}
    45  		if j == abi.SwissMapGroupSlots {
    46  			// No slot passed the quick test.
    47  			return nil
    48  		}
    49  		// There's exactly one slot that passed the quick test. Do the single expensive comparison.
    50  		slotKey = g.key(typ, uintptr(j))
    51  		if key == *(*string)(slotKey) {
    52  			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    53  		}
    54  		return nil
    55  	}
    56  
    57  dohash:
    58  	// This path will cost 1 hash and 1+ε comparisons.
    59  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
    60  	h2 := uint8(h2(hash))
    61  	ctrls = *g.ctrls()
    62  	slotKey = g.key(typ, 0)
    63  
    64  	for range abi.SwissMapGroupSlots {
    65  		if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
    66  			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
    67  		}
    68  		slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
    69  		ctrls >>= 8
    70  	}
    71  	return nil
    72  }
    73  
    74  // Returns true if a and b might be equal.
    75  // Returns false if a and b are definitely not equal.
    76  // Requires len(a)>=8.
    77  func longStringQuickEqualityTest(a, b string) bool {
    78  	if len(a) != len(b) {
    79  		return false
    80  	}
    81  	x, y := stringPtr(a), stringPtr(b)
    82  	// Check first 8 bytes.
    83  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
    84  		return false
    85  	}
    86  	// Check last 8 bytes.
    87  	x = unsafe.Pointer(uintptr(x) + uintptr(len(a)) - 8)
    88  	y = unsafe.Pointer(uintptr(y) + uintptr(len(a)) - 8)
    89  	if *(*[8]byte)(x) != *(*[8]byte)(y) {
    90  		return false
    91  	}
    92  	return true
    93  }
    94  func stringPtr(s string) unsafe.Pointer {
    95  	type stringStruct struct {
    96  		ptr unsafe.Pointer
    97  		len int
    98  	}
    99  	return (*stringStruct)(unsafe.Pointer(&s)).ptr
   100  }
   101  
   102  //go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
   103  func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
   104  	if race.Enabled && m != nil {
   105  		callerpc := sys.GetCallerPC()
   106  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
   107  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   108  	}
   109  
   110  	if m == nil || m.Used() == 0 {
   111  		return unsafe.Pointer(&zeroVal[0])
   112  	}
   113  
   114  	if m.writing != 0 {
   115  		fatal("concurrent map read and map write")
   116  		return nil
   117  	}
   118  
   119  	if m.dirLen <= 0 {
   120  		elem := m.getWithoutKeySmallFastStr(typ, key)
   121  		if elem == nil {
   122  			return unsafe.Pointer(&zeroVal[0])
   123  		}
   124  		return elem
   125  	}
   126  
   127  	k := key
   128  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   129  
   130  	// Select table.
   131  	idx := m.directoryIndex(hash)
   132  	t := m.directoryAt(idx)
   133  
   134  	// Probe table.
   135  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   136  	for ; ; seq = seq.next() {
   137  		g := t.groups.group(typ, seq.offset)
   138  
   139  		match := g.ctrls().matchH2(h2(hash))
   140  
   141  		for match != 0 {
   142  			i := match.first()
   143  
   144  			slotKey := g.key(typ, i)
   145  			if key == *(*string)(slotKey) {
   146  				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
   147  				return slotElem
   148  			}
   149  			match = match.removeFirst()
   150  		}
   151  
   152  		match = g.ctrls().matchEmpty()
   153  		if match != 0 {
   154  			// Finding an empty slot means we've reached the end of
   155  			// the probe sequence.
   156  			return unsafe.Pointer(&zeroVal[0])
   157  		}
   158  	}
   159  }
   160  
   161  //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
   162  func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) {
   163  	if race.Enabled && m != nil {
   164  		callerpc := sys.GetCallerPC()
   165  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
   166  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   167  	}
   168  
   169  	if m == nil || m.Used() == 0 {
   170  		return unsafe.Pointer(&zeroVal[0]), false
   171  	}
   172  
   173  	if m.writing != 0 {
   174  		fatal("concurrent map read and map write")
   175  		return nil, false
   176  	}
   177  
   178  	if m.dirLen <= 0 {
   179  		elem := m.getWithoutKeySmallFastStr(typ, key)
   180  		if elem == nil {
   181  			return unsafe.Pointer(&zeroVal[0]), false
   182  		}
   183  		return elem, true
   184  	}
   185  
   186  	k := key
   187  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   188  
   189  	// Select table.
   190  	idx := m.directoryIndex(hash)
   191  	t := m.directoryAt(idx)
   192  
   193  	// Probe table.
   194  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   195  	for ; ; seq = seq.next() {
   196  		g := t.groups.group(typ, seq.offset)
   197  
   198  		match := g.ctrls().matchH2(h2(hash))
   199  
   200  		for match != 0 {
   201  			i := match.first()
   202  
   203  			slotKey := g.key(typ, i)
   204  			if key == *(*string)(slotKey) {
   205  				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
   206  				return slotElem, true
   207  			}
   208  			match = match.removeFirst()
   209  		}
   210  
   211  		match = g.ctrls().matchEmpty()
   212  		if match != 0 {
   213  			// Finding an empty slot means we've reached the end of
   214  			// the probe sequence.
   215  			return unsafe.Pointer(&zeroVal[0]), false
   216  		}
   217  	}
   218  }
   219  
   220  func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer {
   221  	g := groupReference{
   222  		data: m.dirPtr,
   223  	}
   224  
   225  	match := g.ctrls().matchH2(h2(hash))
   226  
   227  	// Look for an existing slot containing this key.
   228  	for match != 0 {
   229  		i := match.first()
   230  
   231  		slotKey := g.key(typ, i)
   232  		if key == *(*string)(slotKey) {
   233  			// Key needs update, as the backing storage may differ.
   234  			*(*string)(slotKey) = key
   235  			slotElem := g.elem(typ, i)
   236  			return slotElem
   237  		}
   238  		match = match.removeFirst()
   239  	}
   240  
   241  	// There can't be deleted slots, small maps can't have them
   242  	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit
   243  	// more efficient than matchEmpty.
   244  	match = g.ctrls().matchEmptyOrDeleted()
   245  	if match == 0 {
   246  		fatal("small map with no empty slot (concurrent map writes?)")
   247  	}
   248  
   249  	i := match.first()
   250  
   251  	slotKey := g.key(typ, i)
   252  	*(*string)(slotKey) = key
   253  
   254  	slotElem := g.elem(typ, i)
   255  
   256  	g.ctrls().set(i, ctrl(h2(hash)))
   257  	m.used++
   258  
   259  	return slotElem
   260  }
   261  
   262  //go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
   263  func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
   264  	if m == nil {
   265  		panic(errNilAssign)
   266  	}
   267  	if race.Enabled {
   268  		callerpc := sys.GetCallerPC()
   269  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   270  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   271  	}
   272  	if m.writing != 0 {
   273  		fatal("concurrent map writes")
   274  	}
   275  
   276  	k := key
   277  	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed)
   278  
   279  	// Set writing after calling Hasher, since Hasher may panic, in which
   280  	// case we have not actually done a write.
   281  	m.writing ^= 1 // toggle, see comment on writing
   282  
   283  	if m.dirPtr == nil {
   284  		m.growToSmall(typ)
   285  	}
   286  
   287  	if m.dirLen == 0 {
   288  		if m.used < abi.SwissMapGroupSlots {
   289  			elem := m.putSlotSmallFastStr(typ, hash, key)
   290  
   291  			if m.writing == 0 {
   292  				fatal("concurrent map writes")
   293  			}
   294  			m.writing ^= 1
   295  
   296  			return elem
   297  		}
   298  
   299  		// Can't fit another entry, grow to full size map.
   300  		m.growToTable(typ)
   301  	}
   302  
   303  	var slotElem unsafe.Pointer
   304  outer:
   305  	for {
   306  		// Select table.
   307  		idx := m.directoryIndex(hash)
   308  		t := m.directoryAt(idx)
   309  
   310  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   311  
   312  		// As we look for a match, keep track of the first deleted slot
   313  		// we find, which we'll use to insert the new entry if
   314  		// necessary.
   315  		var firstDeletedGroup groupReference
   316  		var firstDeletedSlot uintptr
   317  
   318  		for ; ; seq = seq.next() {
   319  			g := t.groups.group(typ, seq.offset)
   320  			match := g.ctrls().matchH2(h2(hash))
   321  
   322  			// Look for an existing slot containing this key.
   323  			for match != 0 {
   324  				i := match.first()
   325  
   326  				slotKey := g.key(typ, i)
   327  				if key == *(*string)(slotKey) {
   328  					// Key needs update, as the backing
   329  					// storage may differ.
   330  					*(*string)(slotKey) = key
   331  					slotElem = g.elem(typ, i)
   332  
   333  					t.checkInvariants(typ, m)
   334  					break outer
   335  				}
   336  				match = match.removeFirst()
   337  			}
   338  
   339  			// No existing slot for this key in this group. Is this the end
   340  			// of the probe sequence?
   341  			match = g.ctrls().matchEmptyOrDeleted()
   342  			if match == 0 {
   343  				continue // nothing but filled slots. Keep probing.
   344  			}
   345  			i := match.first()
   346  			if g.ctrls().get(i) == ctrlDeleted {
   347  				// There are some deleted slots. Remember
   348  				// the first one, and keep probing.
   349  				if firstDeletedGroup.data == nil {
   350  					firstDeletedGroup = g
   351  					firstDeletedSlot = i
   352  				}
   353  				continue
   354  			}
   355  			// We've found an empty slot, which means we've reached the end of
   356  			// the probe sequence.
   357  
   358  			// If we found a deleted slot along the way, we can
   359  			// replace it without consuming growthLeft.
   360  			if firstDeletedGroup.data != nil {
   361  				g = firstDeletedGroup
   362  				i = firstDeletedSlot
   363  				t.growthLeft++ // will be decremented below to become a no-op.
   364  			}
   365  
   366  			// If there is room left to grow, just insert the new entry.
   367  			if t.growthLeft > 0 {
   368  				slotKey := g.key(typ, i)
   369  				*(*string)(slotKey) = key
   370  
   371  				slotElem = g.elem(typ, i)
   372  
   373  				g.ctrls().set(i, ctrl(h2(hash)))
   374  				t.growthLeft--
   375  				t.used++
   376  				m.used++
   377  
   378  				t.checkInvariants(typ, m)
   379  				break outer
   380  			}
   381  
   382  			t.rehash(typ, m)
   383  			continue outer
   384  		}
   385  	}
   386  
   387  	if m.writing == 0 {
   388  		fatal("concurrent map writes")
   389  	}
   390  	m.writing ^= 1
   391  
   392  	return slotElem
   393  }
   394  
   395  //go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
   396  func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) {
   397  	if race.Enabled {
   398  		callerpc := sys.GetCallerPC()
   399  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   400  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   401  	}
   402  
   403  	if m == nil || m.Used() == 0 {
   404  		return
   405  	}
   406  
   407  	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key)))
   408  }
   409  

View as plain text