Source file src/internal/runtime/maps/runtime_swiss.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build goexperiment.swissmap
     6  
     7  package maps
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/asan"
    12  	"internal/msan"
    13  	"internal/race"
    14  	"internal/runtime/sys"
    15  	"unsafe"
    16  )
    17  
    18  // Functions below pushed from runtime.
    19  
    20  //go:linkname mapKeyError
    21  func mapKeyError(typ *abi.SwissMapType, p unsafe.Pointer) error
    22  
    23  // Pushed from runtime in order to use runtime.plainError
    24  //
    25  //go:linkname errNilAssign
    26  var errNilAssign error
    27  
    28  // Pull from runtime. It is important that is this the exact same copy as the
    29  // runtime because runtime.mapaccess1_fat compares the returned pointer with
    30  // &runtime.zeroVal[0].
    31  // TODO: move zeroVal to internal/abi?
    32  //
    33  //go:linkname zeroVal runtime.zeroVal
    34  var zeroVal [abi.ZeroValSize]byte
    35  
    36  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
    37  // it will return a reference to the zero object for the elem type if
    38  // the key is not in the map.
    39  // NOTE: The returned pointer may keep the whole map live, so don't
    40  // hold onto it for very long.
    41  //
    42  //go:linkname runtime_mapaccess1 runtime.mapaccess1
    43  func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
    44  	if race.Enabled && m != nil {
    45  		callerpc := sys.GetCallerPC()
    46  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
    47  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
    48  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
    49  	}
    50  	if msan.Enabled && m != nil {
    51  		msan.Read(key, typ.Key.Size_)
    52  	}
    53  	if asan.Enabled && m != nil {
    54  		asan.Read(key, typ.Key.Size_)
    55  	}
    56  
    57  	if m == nil || m.Used() == 0 {
    58  		if err := mapKeyError(typ, key); err != nil {
    59  			panic(err) // see issue 23734
    60  		}
    61  		return unsafe.Pointer(&zeroVal[0])
    62  	}
    63  
    64  	if m.writing != 0 {
    65  		fatal("concurrent map read and map write")
    66  	}
    67  
    68  	hash := typ.Hasher(key, m.seed)
    69  
    70  	if m.dirLen <= 0 {
    71  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
    72  		if !ok {
    73  			return unsafe.Pointer(&zeroVal[0])
    74  		}
    75  		return elem
    76  	}
    77  
    78  	// Select table.
    79  	idx := m.directoryIndex(hash)
    80  	t := m.directoryAt(idx)
    81  
    82  	// Probe table.
    83  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
    84  	for ; ; seq = seq.next() {
    85  		g := t.groups.group(typ, seq.offset)
    86  
    87  		match := g.ctrls().matchH2(h2(hash))
    88  
    89  		for match != 0 {
    90  			i := match.first()
    91  
    92  			slotKey := g.key(typ, i)
    93  			slotKeyOrig := slotKey
    94  			if typ.IndirectKey() {
    95  				slotKey = *((*unsafe.Pointer)(slotKey))
    96  			}
    97  			if typ.Key.Equal(key, slotKey) {
    98  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
    99  				if typ.IndirectElem() {
   100  					slotElem = *((*unsafe.Pointer)(slotElem))
   101  				}
   102  				return slotElem
   103  			}
   104  			match = match.removeFirst()
   105  		}
   106  
   107  		match = g.ctrls().matchEmpty()
   108  		if match != 0 {
   109  			// Finding an empty slot means we've reached the end of
   110  			// the probe sequence.
   111  			return unsafe.Pointer(&zeroVal[0])
   112  		}
   113  	}
   114  }
   115  
   116  //go:linkname runtime_mapaccess2 runtime.mapaccess2
   117  func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
   118  	if race.Enabled && m != nil {
   119  		callerpc := sys.GetCallerPC()
   120  		pc := abi.FuncPCABIInternal(runtime_mapaccess1)
   121  		race.ReadPC(unsafe.Pointer(m), callerpc, pc)
   122  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   123  	}
   124  	if msan.Enabled && m != nil {
   125  		msan.Read(key, typ.Key.Size_)
   126  	}
   127  	if asan.Enabled && m != nil {
   128  		asan.Read(key, typ.Key.Size_)
   129  	}
   130  
   131  	if m == nil || m.Used() == 0 {
   132  		if err := mapKeyError(typ, key); err != nil {
   133  			panic(err) // see issue 23734
   134  		}
   135  		return unsafe.Pointer(&zeroVal[0]), false
   136  	}
   137  
   138  	if m.writing != 0 {
   139  		fatal("concurrent map read and map write")
   140  	}
   141  
   142  	hash := typ.Hasher(key, m.seed)
   143  
   144  	if m.dirLen == 0 {
   145  		_, elem, ok := m.getWithKeySmall(typ, hash, key)
   146  		if !ok {
   147  			return unsafe.Pointer(&zeroVal[0]), false
   148  		}
   149  		return elem, true
   150  	}
   151  
   152  	// Select table.
   153  	idx := m.directoryIndex(hash)
   154  	t := m.directoryAt(idx)
   155  
   156  	// Probe table.
   157  	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   158  	for ; ; seq = seq.next() {
   159  		g := t.groups.group(typ, seq.offset)
   160  
   161  		match := g.ctrls().matchH2(h2(hash))
   162  
   163  		for match != 0 {
   164  			i := match.first()
   165  
   166  			slotKey := g.key(typ, i)
   167  			slotKeyOrig := slotKey
   168  			if typ.IndirectKey() {
   169  				slotKey = *((*unsafe.Pointer)(slotKey))
   170  			}
   171  			if typ.Key.Equal(key, slotKey) {
   172  				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   173  				if typ.IndirectElem() {
   174  					slotElem = *((*unsafe.Pointer)(slotElem))
   175  				}
   176  				return slotElem, true
   177  			}
   178  			match = match.removeFirst()
   179  		}
   180  
   181  		match = g.ctrls().matchEmpty()
   182  		if match != 0 {
   183  			// Finding an empty slot means we've reached the end of
   184  			// the probe sequence.
   185  			return unsafe.Pointer(&zeroVal[0]), false
   186  		}
   187  	}
   188  }
   189  
   190  //go:linkname runtime_mapassign runtime.mapassign
   191  func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
   192  	if m == nil {
   193  		panic(errNilAssign)
   194  	}
   195  	if race.Enabled {
   196  		callerpc := sys.GetCallerPC()
   197  		pc := abi.FuncPCABIInternal(runtime_mapassign)
   198  		race.WritePC(unsafe.Pointer(m), callerpc, pc)
   199  		race.ReadObjectPC(typ.Key, key, callerpc, pc)
   200  	}
   201  	if msan.Enabled {
   202  		msan.Read(key, typ.Key.Size_)
   203  	}
   204  	if asan.Enabled {
   205  		asan.Read(key, typ.Key.Size_)
   206  	}
   207  	if m.writing != 0 {
   208  		fatal("concurrent map writes")
   209  	}
   210  
   211  	hash := typ.Hasher(key, m.seed)
   212  
   213  	// Set writing after calling Hasher, since Hasher may panic, in which
   214  	// case we have not actually done a write.
   215  	m.writing ^= 1 // toggle, see comment on writing
   216  
   217  	if m.dirPtr == nil {
   218  		m.growToSmall(typ)
   219  	}
   220  
   221  	if m.dirLen == 0 {
   222  		if m.used < abi.SwissMapGroupSlots {
   223  			elem := m.putSlotSmall(typ, hash, key)
   224  
   225  			if m.writing == 0 {
   226  				fatal("concurrent map writes")
   227  			}
   228  			m.writing ^= 1
   229  
   230  			return elem
   231  		}
   232  
   233  		// Can't fit another entry, grow to full size map.
   234  		m.growToTable(typ)
   235  	}
   236  
   237  	var slotElem unsafe.Pointer
   238  outer:
   239  	for {
   240  		// Select table.
   241  		idx := m.directoryIndex(hash)
   242  		t := m.directoryAt(idx)
   243  
   244  		seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
   245  
   246  		// As we look for a match, keep track of the first deleted slot
   247  		// we find, which we'll use to insert the new entry if
   248  		// necessary.
   249  		var firstDeletedGroup groupReference
   250  		var firstDeletedSlot uintptr
   251  
   252  		for ; ; seq = seq.next() {
   253  			g := t.groups.group(typ, seq.offset)
   254  			match := g.ctrls().matchH2(h2(hash))
   255  
   256  			// Look for an existing slot containing this key.
   257  			for match != 0 {
   258  				i := match.first()
   259  
   260  				slotKey := g.key(typ, i)
   261  				slotKeyOrig := slotKey
   262  				if typ.IndirectKey() {
   263  					slotKey = *((*unsafe.Pointer)(slotKey))
   264  				}
   265  				if typ.Key.Equal(key, slotKey) {
   266  					if typ.NeedKeyUpdate() {
   267  						typedmemmove(typ.Key, slotKey, key)
   268  					}
   269  
   270  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   271  					if typ.IndirectElem() {
   272  						slotElem = *((*unsafe.Pointer)(slotElem))
   273  					}
   274  
   275  					t.checkInvariants(typ, m)
   276  					break outer
   277  				}
   278  				match = match.removeFirst()
   279  			}
   280  
   281  			// No existing slot for this key in this group. Is this the end
   282  			// of the probe sequence?
   283  			match = g.ctrls().matchEmpty()
   284  			if match != 0 {
   285  				// Finding an empty slot means we've reached the end of
   286  				// the probe sequence.
   287  
   288  				var i uintptr
   289  
   290  				// If we found a deleted slot along the way, we
   291  				// can replace it without consuming growthLeft.
   292  				if firstDeletedGroup.data != nil {
   293  					g = firstDeletedGroup
   294  					i = firstDeletedSlot
   295  					t.growthLeft++ // will be decremented below to become a no-op.
   296  				} else {
   297  					// Otherwise, use the empty slot.
   298  					i = match.first()
   299  				}
   300  
   301  				// If there is room left to grow, just insert the new entry.
   302  				if t.growthLeft > 0 {
   303  					slotKey := g.key(typ, i)
   304  					slotKeyOrig := slotKey
   305  					if typ.IndirectKey() {
   306  						kmem := newobject(typ.Key)
   307  						*(*unsafe.Pointer)(slotKey) = kmem
   308  						slotKey = kmem
   309  					}
   310  					typedmemmove(typ.Key, slotKey, key)
   311  
   312  					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
   313  					if typ.IndirectElem() {
   314  						emem := newobject(typ.Elem)
   315  						*(*unsafe.Pointer)(slotElem) = emem
   316  						slotElem = emem
   317  					}
   318  
   319  					g.ctrls().set(i, ctrl(h2(hash)))
   320  					t.growthLeft--
   321  					t.used++
   322  					m.used++
   323  
   324  					t.checkInvariants(typ, m)
   325  					break outer
   326  				}
   327  
   328  				t.rehash(typ, m)
   329  				continue outer
   330  			}
   331  
   332  			// No empty slots in this group. Check for a deleted
   333  			// slot, which we'll use if we don't find a match later
   334  			// in the probe sequence.
   335  			//
   336  			// We only need to remember a single deleted slot.
   337  			if firstDeletedGroup.data == nil {
   338  				// Since we already checked for empty slots
   339  				// above, matches here must be deleted slots.
   340  				match = g.ctrls().matchEmptyOrDeleted()
   341  				if match != 0 {
   342  					firstDeletedGroup = g
   343  					firstDeletedSlot = match.first()
   344  				}
   345  			}
   346  		}
   347  	}
   348  
   349  	if m.writing == 0 {
   350  		fatal("concurrent map writes")
   351  	}
   352  	m.writing ^= 1
   353  
   354  	return slotElem
   355  }
   356  

View as plain text