Source file src/runtime/iface.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"internal/runtime/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  const itabInitSize = 512
    16  
    17  var (
    18  	itabLock      mutex                               // lock for accessing itab table
    19  	itabTable     = &itabTableInit                    // pointer to current table
    20  	itabTableInit = itabTableType{size: itabInitSize} // starter table
    21  )
    22  
    23  // Note: change the formula in the mallocgc call in itabAdd if you change these fields.
    24  type itabTableType struct {
    25  	size    uintptr             // length of entries array. Always a power of 2.
    26  	count   uintptr             // current number of filled entries.
    27  	entries [itabInitSize]*itab // really [size] large
    28  }
    29  
    30  func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
    31  	// compiler has provided some good hash codes for us.
    32  	return uintptr(inter.Type.Hash ^ typ.Hash)
    33  }
    34  
    35  func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
    36  	if len(inter.Methods) == 0 {
    37  		throw("internal error - misuse of itab")
    38  	}
    39  
    40  	// easy case
    41  	if typ.TFlag&abi.TFlagUncommon == 0 {
    42  		if canfail {
    43  			return nil
    44  		}
    45  		name := toRType(&inter.Type).nameOff(inter.Methods[0].Name)
    46  		panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()})
    47  	}
    48  
    49  	var m *itab
    50  
    51  	// First, look in the existing table to see if we can find the itab we need.
    52  	// This is by far the most common case, so do it without locks.
    53  	// Use atomic to ensure we see any previous writes done by the thread
    54  	// that updates the itabTable field (with atomic.Storep in itabAdd).
    55  	t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable)))
    56  	if m = t.find(inter, typ); m != nil {
    57  		goto finish
    58  	}
    59  
    60  	// Not found.  Grab the lock and try again.
    61  	lock(&itabLock)
    62  	if m = itabTable.find(inter, typ); m != nil {
    63  		unlock(&itabLock)
    64  		goto finish
    65  	}
    66  
    67  	// Entry doesn't exist yet. Make a new entry & add it.
    68  	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
    69  	m.Inter = inter
    70  	m.Type = typ
    71  	// The hash is used in type switches. However, compiler statically generates itab's
    72  	// for all interface/type pairs used in switches (which are added to itabTable
    73  	// in itabsinit). The dynamically-generated itab's never participate in type switches,
    74  	// and thus the hash is irrelevant.
    75  	// Note: m.Hash is _not_ the hash used for the runtime itabTable hash table.
    76  	m.Hash = 0
    77  	itabInit(m, true)
    78  	itabAdd(m)
    79  	unlock(&itabLock)
    80  finish:
    81  	if m.Fun[0] != 0 {
    82  		return m
    83  	}
    84  	if canfail {
    85  		return nil
    86  	}
    87  	// this can only happen if the conversion
    88  	// was already done once using the , ok form
    89  	// and we have a cached negative result.
    90  	// The cached result doesn't record which
    91  	// interface function was missing, so initialize
    92  	// the itab again to get the missing function name.
    93  	panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: itabInit(m, false)})
    94  }
    95  
    96  // find finds the given interface/type pair in t.
    97  // Returns nil if the given interface/type pair isn't present.
    98  func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
    99  	// Implemented using quadratic probing.
   100  	// Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k.
   101  	// We're guaranteed to hit all table entries using this probe sequence.
   102  	mask := t.size - 1
   103  	h := itabHashFunc(inter, typ) & mask
   104  	for i := uintptr(1); ; i++ {
   105  		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
   106  		// Use atomic read here so if we see m != nil, we also see
   107  		// the initializations of the fields of m.
   108  		// m := *p
   109  		m := (*itab)(atomic.Loadp(unsafe.Pointer(p)))
   110  		if m == nil {
   111  			return nil
   112  		}
   113  		if m.Inter == inter && m.Type == typ {
   114  			return m
   115  		}
   116  		h += i
   117  		h &= mask
   118  	}
   119  }
   120  
   121  // itabAdd adds the given itab to the itab hash table.
   122  // itabLock must be held.
   123  func itabAdd(m *itab) {
   124  	// Bugs can lead to calling this while mallocing is set,
   125  	// typically because this is called while panicking.
   126  	// Crash reliably, rather than only when we need to grow
   127  	// the hash table.
   128  	if getg().m.mallocing != 0 {
   129  		throw("malloc deadlock")
   130  	}
   131  
   132  	t := itabTable
   133  	if t.count >= 3*(t.size/4) { // 75% load factor
   134  		// Grow hash table.
   135  		// t2 = new(itabTableType) + some additional entries
   136  		// We lie and tell malloc we want pointer-free memory because
   137  		// all the pointed-to values are not in the heap.
   138  		t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
   139  		t2.size = t.size * 2
   140  
   141  		// Copy over entries.
   142  		// Note: while copying, other threads may look for an itab and
   143  		// fail to find it. That's ok, they will then try to get the itab lock
   144  		// and as a consequence wait until this copying is complete.
   145  		iterate_itabs(t2.add)
   146  		if t2.count != t.count {
   147  			throw("mismatched count during itab table copy")
   148  		}
   149  		// Publish new hash table. Use an atomic write: see comment in getitab.
   150  		atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2))
   151  		// Adopt the new table as our own.
   152  		t = itabTable
   153  		// Note: the old table can be GC'ed here.
   154  	}
   155  	t.add(m)
   156  }
   157  
   158  // add adds the given itab to itab table t.
   159  // itabLock must be held.
   160  func (t *itabTableType) add(m *itab) {
   161  	// See comment in find about the probe sequence.
   162  	// Insert new itab in the first empty spot in the probe sequence.
   163  	mask := t.size - 1
   164  	h := itabHashFunc(m.Inter, m.Type) & mask
   165  	for i := uintptr(1); ; i++ {
   166  		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
   167  		m2 := *p
   168  		if m2 == m {
   169  			// A given itab may be used in more than one module
   170  			// and thanks to the way global symbol resolution works, the
   171  			// pointed-to itab may already have been inserted into the
   172  			// global 'hash'.
   173  			return
   174  		}
   175  		if m2 == nil {
   176  			// Use atomic write here so if a reader sees m, it also
   177  			// sees the correctly initialized fields of m.
   178  			// NoWB is ok because m is not in heap memory.
   179  			// *p = m
   180  			atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m))
   181  			t.count++
   182  			return
   183  		}
   184  		h += i
   185  		h &= mask
   186  	}
   187  }
   188  
   189  // itabInit fills in the m.Fun array with all the code pointers for
   190  // the m.Inter/m.Type pair. If the type does not implement the interface,
   191  // it sets m.Fun[0] to 0 and returns the name of an interface function that is missing.
   192  // If !firstTime, itabInit will not write anything to m.Fun (see issue 65962).
   193  // It is ok to call this multiple times on the same m, even concurrently
   194  // (although it will only be called once with firstTime==true).
   195  func itabInit(m *itab, firstTime bool) string {
   196  	inter := m.Inter
   197  	typ := m.Type
   198  	x := typ.Uncommon()
   199  
   200  	// both inter and typ have method sorted by name,
   201  	// and interface names are unique,
   202  	// so can iterate over both in lock step;
   203  	// the loop is O(ni+nt) not O(ni*nt).
   204  	ni := len(inter.Methods)
   205  	nt := int(x.Mcount)
   206  	xmhdr := (*[1 << 16]abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff)))[:nt:nt]
   207  	j := 0
   208  	methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.Fun[0]))[:ni:ni]
   209  	var fun0 unsafe.Pointer
   210  imethods:
   211  	for k := 0; k < ni; k++ {
   212  		i := &inter.Methods[k]
   213  		itype := toRType(&inter.Type).typeOff(i.Typ)
   214  		name := toRType(&inter.Type).nameOff(i.Name)
   215  		iname := name.Name()
   216  		ipkg := pkgPath(name)
   217  		if ipkg == "" {
   218  			ipkg = inter.PkgPath.Name()
   219  		}
   220  		for ; j < nt; j++ {
   221  			t := &xmhdr[j]
   222  			rtyp := toRType(typ)
   223  			tname := rtyp.nameOff(t.Name)
   224  			if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname {
   225  				pkgPath := pkgPath(tname)
   226  				if pkgPath == "" {
   227  					pkgPath = rtyp.nameOff(x.PkgPath).Name()
   228  				}
   229  				if tname.IsExported() || pkgPath == ipkg {
   230  					ifn := rtyp.textOff(t.Ifn)
   231  					if k == 0 {
   232  						fun0 = ifn // we'll set m.Fun[0] at the end
   233  					} else if firstTime {
   234  						methods[k] = ifn
   235  					}
   236  					continue imethods
   237  				}
   238  			}
   239  		}
   240  		// didn't find method
   241  		// Leaves m.Fun[0] set to 0.
   242  		return iname
   243  	}
   244  	if firstTime {
   245  		m.Fun[0] = uintptr(fun0)
   246  	}
   247  	return ""
   248  }
   249  
   250  func itabsinit() {
   251  	lockInit(&itabLock, lockRankItab)
   252  	lock(&itabLock)
   253  	for _, md := range activeModules() {
   254  		for _, i := range md.itablinks {
   255  			itabAdd(i)
   256  		}
   257  	}
   258  	unlock(&itabLock)
   259  }
   260  
   261  // panicdottypeE is called when doing an e.(T) conversion and the conversion fails.
   262  // have = the dynamic type we have.
   263  // want = the static type we're trying to convert to.
   264  // iface = the static type we're converting from.
   265  func panicdottypeE(have, want, iface *_type) {
   266  	panic(&TypeAssertionError{iface, have, want, ""})
   267  }
   268  
   269  // panicdottypeI is called when doing an i.(T) conversion and the conversion fails.
   270  // Same args as panicdottypeE, but "have" is the dynamic itab we have.
   271  func panicdottypeI(have *itab, want, iface *_type) {
   272  	var t *_type
   273  	if have != nil {
   274  		t = have.Type
   275  	}
   276  	panicdottypeE(t, want, iface)
   277  }
   278  
   279  // panicnildottype is called when doing an i.(T) conversion and the interface i is nil.
   280  // want = the static type we're trying to convert to.
   281  func panicnildottype(want *_type) {
   282  	panic(&TypeAssertionError{nil, nil, want, ""})
   283  	// TODO: Add the static type we're converting from as well.
   284  	// It might generate a better error message.
   285  	// Just to match other nil conversion errors, we don't for now.
   286  }
   287  
   288  // The specialized convTx routines need a type descriptor to use when calling mallocgc.
   289  // We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness.
   290  // However, when debugging, it'd be nice to have some indication in mallocgc where the types came from,
   291  // so we use named types here.
   292  // We then construct interface values of these types,
   293  // and then extract the type word to use as needed.
   294  type (
   295  	uint16InterfacePtr uint16
   296  	uint32InterfacePtr uint32
   297  	uint64InterfacePtr uint64
   298  	stringInterfacePtr string
   299  	sliceInterfacePtr  []byte
   300  )
   301  
   302  var (
   303  	uint16Eface any = uint16InterfacePtr(0)
   304  	uint32Eface any = uint32InterfacePtr(0)
   305  	uint64Eface any = uint64InterfacePtr(0)
   306  	stringEface any = stringInterfacePtr("")
   307  	sliceEface  any = sliceInterfacePtr(nil)
   308  
   309  	uint16Type *_type = efaceOf(&uint16Eface)._type
   310  	uint32Type *_type = efaceOf(&uint32Eface)._type
   311  	uint64Type *_type = efaceOf(&uint64Eface)._type
   312  	stringType *_type = efaceOf(&stringEface)._type
   313  	sliceType  *_type = efaceOf(&sliceEface)._type
   314  )
   315  
   316  // The conv and assert functions below do very similar things.
   317  // The convXXX functions are guaranteed by the compiler to succeed.
   318  // The assertXXX functions may fail (either panicking or returning false,
   319  // depending on whether they are 1-result or 2-result).
   320  // The convXXX functions succeed on a nil input, whereas the assertXXX
   321  // functions fail on a nil input.
   322  
   323  // convT converts a value of type t, which is pointed to by v, to a pointer that can
   324  // be used as the second word of an interface value.
   325  func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
   326  	if raceenabled {
   327  		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT))
   328  	}
   329  	if msanenabled {
   330  		msanread(v, t.Size_)
   331  	}
   332  	if asanenabled {
   333  		asanread(v, t.Size_)
   334  	}
   335  	x := mallocgc(t.Size_, t, true)
   336  	typedmemmove(t, x, v)
   337  	return x
   338  }
   339  func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
   340  	// TODO: maybe take size instead of type?
   341  	if raceenabled {
   342  		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr))
   343  	}
   344  	if msanenabled {
   345  		msanread(v, t.Size_)
   346  	}
   347  	if asanenabled {
   348  		asanread(v, t.Size_)
   349  	}
   350  
   351  	x := mallocgc(t.Size_, t, false)
   352  	memmove(x, v, t.Size_)
   353  	return x
   354  }
   355  
   356  func convT16(val uint16) (x unsafe.Pointer) {
   357  	if val < uint16(len(staticuint64s)) {
   358  		x = unsafe.Pointer(&staticuint64s[val])
   359  		if goarch.BigEndian {
   360  			x = add(x, 6)
   361  		}
   362  	} else {
   363  		x = mallocgc(2, uint16Type, false)
   364  		*(*uint16)(x) = val
   365  	}
   366  	return
   367  }
   368  
   369  func convT32(val uint32) (x unsafe.Pointer) {
   370  	if val < uint32(len(staticuint64s)) {
   371  		x = unsafe.Pointer(&staticuint64s[val])
   372  		if goarch.BigEndian {
   373  			x = add(x, 4)
   374  		}
   375  	} else {
   376  		x = mallocgc(4, uint32Type, false)
   377  		*(*uint32)(x) = val
   378  	}
   379  	return
   380  }
   381  
   382  func convT64(val uint64) (x unsafe.Pointer) {
   383  	if val < uint64(len(staticuint64s)) {
   384  		x = unsafe.Pointer(&staticuint64s[val])
   385  	} else {
   386  		x = mallocgc(8, uint64Type, false)
   387  		*(*uint64)(x) = val
   388  	}
   389  	return
   390  }
   391  
   392  func convTstring(val string) (x unsafe.Pointer) {
   393  	if val == "" {
   394  		x = unsafe.Pointer(&zeroVal[0])
   395  	} else {
   396  		x = mallocgc(unsafe.Sizeof(val), stringType, true)
   397  		*(*string)(x) = val
   398  	}
   399  	return
   400  }
   401  
   402  func convTslice(val []byte) (x unsafe.Pointer) {
   403  	// Note: this must work for any element type, not just byte.
   404  	if (*slice)(unsafe.Pointer(&val)).array == nil {
   405  		x = unsafe.Pointer(&zeroVal[0])
   406  	} else {
   407  		x = mallocgc(unsafe.Sizeof(val), sliceType, true)
   408  		*(*[]byte)(x) = val
   409  	}
   410  	return
   411  }
   412  
   413  func assertE2I(inter *interfacetype, t *_type) *itab {
   414  	if t == nil {
   415  		// explicit conversions require non-nil interface value.
   416  		panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
   417  	}
   418  	return getitab(inter, t, false)
   419  }
   420  
   421  func assertE2I2(inter *interfacetype, t *_type) *itab {
   422  	if t == nil {
   423  		return nil
   424  	}
   425  	return getitab(inter, t, true)
   426  }
   427  
   428  // typeAssert builds an itab for the concrete type t and the
   429  // interface type s.Inter. If the conversion is not possible it
   430  // panics if s.CanFail is false and returns nil if s.CanFail is true.
   431  func typeAssert(s *abi.TypeAssert, t *_type) *itab {
   432  	var tab *itab
   433  	if t == nil {
   434  		if !s.CanFail {
   435  			panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
   436  		}
   437  	} else {
   438  		tab = getitab(s.Inter, t, s.CanFail)
   439  	}
   440  
   441  	if !abi.UseInterfaceSwitchCache(GOARCH) {
   442  		return tab
   443  	}
   444  
   445  	// Maybe update the cache, so the next time the generated code
   446  	// doesn't need to call into the runtime.
   447  	if cheaprand()&1023 != 0 {
   448  		// Only bother updating the cache ~1 in 1000 times.
   449  		return tab
   450  	}
   451  	// Load the current cache.
   452  	oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
   453  
   454  	if cheaprand()&uint32(oldC.Mask) != 0 {
   455  		// As cache gets larger, choose to update it less often
   456  		// so we can amortize the cost of building a new cache.
   457  		return tab
   458  	}
   459  
   460  	// Make a new cache.
   461  	newC := buildTypeAssertCache(oldC, t, tab)
   462  
   463  	// Update cache. Use compare-and-swap so if multiple threads
   464  	// are fighting to update the cache, at least one of their
   465  	// updates will stick.
   466  	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
   467  
   468  	return tab
   469  }
   470  
   471  func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
   472  	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
   473  
   474  	// Count the number of entries we need.
   475  	n := 1
   476  	for _, e := range oldEntries {
   477  		if e.Typ != 0 {
   478  			n++
   479  		}
   480  	}
   481  
   482  	// Figure out how big a table we need.
   483  	// We need at least one more slot than the number of entries
   484  	// so that we are guaranteed an empty slot (for termination).
   485  	newN := n * 2                         // make it at most 50% full
   486  	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
   487  
   488  	// Allocate the new table.
   489  	newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
   490  	newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
   491  	newC.Mask = uintptr(newN - 1)
   492  	newEntries := unsafe.Slice(&newC.Entries[0], newN)
   493  
   494  	// Fill the new table.
   495  	addEntry := func(typ *_type, tab *itab) {
   496  		h := int(typ.Hash) & (newN - 1)
   497  		for {
   498  			if newEntries[h].Typ == 0 {
   499  				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
   500  				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
   501  				return
   502  			}
   503  			h = (h + 1) & (newN - 1)
   504  		}
   505  	}
   506  	for _, e := range oldEntries {
   507  		if e.Typ != 0 {
   508  			addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
   509  		}
   510  	}
   511  	addEntry(typ, tab)
   512  
   513  	return newC
   514  }
   515  
   516  // Empty type assert cache. Contains one entry with a nil Typ (which
   517  // causes a cache lookup to fail immediately.)
   518  var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
   519  
   520  // interfaceSwitch compares t against the list of cases in s.
   521  // If t matches case i, interfaceSwitch returns the case index i and
   522  // an itab for the pair <t, s.Cases[i]>.
   523  // If there is no match, return N,nil, where N is the number
   524  // of cases.
   525  func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
   526  	cases := unsafe.Slice(&s.Cases[0], s.NCases)
   527  
   528  	// Results if we don't find a match.
   529  	case_ := len(cases)
   530  	var tab *itab
   531  
   532  	// Look through each case in order.
   533  	for i, c := range cases {
   534  		tab = getitab(c, t, true)
   535  		if tab != nil {
   536  			case_ = i
   537  			break
   538  		}
   539  	}
   540  
   541  	if !abi.UseInterfaceSwitchCache(GOARCH) {
   542  		return case_, tab
   543  	}
   544  
   545  	// Maybe update the cache, so the next time the generated code
   546  	// doesn't need to call into the runtime.
   547  	if cheaprand()&1023 != 0 {
   548  		// Only bother updating the cache ~1 in 1000 times.
   549  		// This ensures we don't waste memory on switches, or
   550  		// switch arguments, that only happen a few times.
   551  		return case_, tab
   552  	}
   553  	// Load the current cache.
   554  	oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
   555  
   556  	if cheaprand()&uint32(oldC.Mask) != 0 {
   557  		// As cache gets larger, choose to update it less often
   558  		// so we can amortize the cost of building a new cache
   559  		// (that cost is linear in oldc.Mask).
   560  		return case_, tab
   561  	}
   562  
   563  	// Make a new cache.
   564  	newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
   565  
   566  	// Update cache. Use compare-and-swap so if multiple threads
   567  	// are fighting to update the cache, at least one of their
   568  	// updates will stick.
   569  	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
   570  
   571  	return case_, tab
   572  }
   573  
   574  // buildInterfaceSwitchCache constructs an interface switch cache
   575  // containing all the entries from oldC plus the new entry
   576  // (typ,case_,tab).
   577  func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
   578  	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
   579  
   580  	// Count the number of entries we need.
   581  	n := 1
   582  	for _, e := range oldEntries {
   583  		if e.Typ != 0 {
   584  			n++
   585  		}
   586  	}
   587  
   588  	// Figure out how big a table we need.
   589  	// We need at least one more slot than the number of entries
   590  	// so that we are guaranteed an empty slot (for termination).
   591  	newN := n * 2                         // make it at most 50% full
   592  	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
   593  
   594  	// Allocate the new table.
   595  	newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
   596  	newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
   597  	newC.Mask = uintptr(newN - 1)
   598  	newEntries := unsafe.Slice(&newC.Entries[0], newN)
   599  
   600  	// Fill the new table.
   601  	addEntry := func(typ *_type, case_ int, tab *itab) {
   602  		h := int(typ.Hash) & (newN - 1)
   603  		for {
   604  			if newEntries[h].Typ == 0 {
   605  				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
   606  				newEntries[h].Case = case_
   607  				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
   608  				return
   609  			}
   610  			h = (h + 1) & (newN - 1)
   611  		}
   612  	}
   613  	for _, e := range oldEntries {
   614  		if e.Typ != 0 {
   615  			addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
   616  		}
   617  	}
   618  	addEntry(typ, case_, tab)
   619  
   620  	return newC
   621  }
   622  
   623  // Empty interface switch cache. Contains one entry with a nil Typ (which
   624  // causes a cache lookup to fail immediately.)
   625  var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
   626  
   627  //go:linkname reflect_ifaceE2I reflect.ifaceE2I
   628  func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
   629  	*dst = iface{assertE2I(inter, e._type), e.data}
   630  }
   631  
   632  //go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I
   633  func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
   634  	*dst = iface{assertE2I(inter, e._type), e.data}
   635  }
   636  
   637  func iterate_itabs(fn func(*itab)) {
   638  	// Note: only runs during stop the world or with itabLock held,
   639  	// so no other locks/atomics needed.
   640  	t := itabTable
   641  	for i := uintptr(0); i < t.size; i++ {
   642  		m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
   643  		if m != nil {
   644  			fn(m)
   645  		}
   646  	}
   647  }
   648  
   649  // staticuint64s is used to avoid allocating in convTx for small integer values.
   650  var staticuint64s = [...]uint64{
   651  	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
   652  	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
   653  	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
   654  	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
   655  	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
   656  	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
   657  	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
   658  	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
   659  	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
   660  	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
   661  	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
   662  	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
   663  	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
   664  	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
   665  	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
   666  	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
   667  	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
   668  	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
   669  	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
   670  	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
   671  	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
   672  	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
   673  	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
   674  	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
   675  	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
   676  	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
   677  	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
   678  	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
   679  	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
   680  	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
   681  	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
   682  	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
   683  }
   684  
   685  // The linker redirects a reference of a method that it determined
   686  // unreachable to a reference to this function, so it will throw if
   687  // ever called.
   688  func unreachableMethod() {
   689  	throw("unreachable method called. linker bug?")
   690  }
   691  

View as plain text