Source file src/runtime/race.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"unsafe"
    12  )
    13  
    14  // Public race detection API, present iff build with -race.
    15  
    16  func RaceRead(addr unsafe.Pointer)
    17  func RaceWrite(addr unsafe.Pointer)
    18  func RaceReadRange(addr unsafe.Pointer, len int)
    19  func RaceWriteRange(addr unsafe.Pointer, len int)
    20  
    21  func RaceErrors() int {
    22  	var n uint64
    23  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    24  	return int(n)
    25  }
    26  
    27  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    28  // between goroutines. These inform the race detector about actual synchronization
    29  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    30  // sections of code).
    31  // RaceAcquire establishes a happens-before relation with the preceding
    32  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    33  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    34  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    35  //
    36  //go:nosplit
    37  func RaceAcquire(addr unsafe.Pointer) {
    38  	raceacquire(addr)
    39  }
    40  
    41  // RaceRelease performs a release operation on addr that
    42  // can synchronize with a later RaceAcquire on addr.
    43  //
    44  // In terms of the C memory model, RaceRelease is equivalent to
    45  // atomic_store(memory_order_release).
    46  //
    47  //go:nosplit
    48  func RaceRelease(addr unsafe.Pointer) {
    49  	racerelease(addr)
    50  }
    51  
    52  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    53  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    54  //
    55  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    56  // atomic_exchange(memory_order_release).
    57  //
    58  //go:nosplit
    59  func RaceReleaseMerge(addr unsafe.Pointer) {
    60  	racereleasemerge(addr)
    61  }
    62  
    63  // RaceDisable disables handling of race synchronization events in the current goroutine.
    64  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    65  // Non-synchronization events (memory accesses, function entry/exit) still affect
    66  // the race detector.
    67  //
    68  //go:nosplit
    69  func RaceDisable() {
    70  	gp := getg()
    71  	if gp.raceignore == 0 {
    72  		racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
    73  	}
    74  	gp.raceignore++
    75  }
    76  
    77  // RaceEnable re-enables handling of race events in the current goroutine.
    78  //
    79  //go:nosplit
    80  func RaceEnable() {
    81  	gp := getg()
    82  	gp.raceignore--
    83  	if gp.raceignore == 0 {
    84  		racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
    85  	}
    86  }
    87  
    88  // Private interface for the runtime.
    89  
    90  const raceenabled = true
    91  
    92  // For all functions accepting callerpc and pc,
    93  // callerpc is a return PC of the function that calls this function,
    94  // pc is start PC of the function that calls this function.
    95  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    96  	kind := t.Kind_ & kindMask
    97  	if kind == kindArray || kind == kindStruct {
    98  		// for composite objects we have to read every address
    99  		// because a write might happen to any subobject.
   100  		racereadrangepc(addr, t.Size_, callerpc, pc)
   101  	} else {
   102  		// for non-composite objects we can read just the start
   103  		// address, as any write must write the first byte.
   104  		racereadpc(addr, callerpc, pc)
   105  	}
   106  }
   107  
   108  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   109  	kind := t.Kind_ & kindMask
   110  	if kind == kindArray || kind == kindStruct {
   111  		// for composite objects we have to write every address
   112  		// because a write might happen to any subobject.
   113  		racewriterangepc(addr, t.Size_, callerpc, pc)
   114  	} else {
   115  		// for non-composite objects we can write just the start
   116  		// address, as any write must write the first byte.
   117  		racewritepc(addr, callerpc, pc)
   118  	}
   119  }
   120  
   121  //go:noescape
   122  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   123  
   124  //go:noescape
   125  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   126  
   127  type symbolizeCodeContext struct {
   128  	pc   uintptr
   129  	fn   *byte
   130  	file *byte
   131  	line uintptr
   132  	off  uintptr
   133  	res  uintptr
   134  }
   135  
   136  var qq = [...]byte{'?', '?', 0}
   137  var dash = [...]byte{'-', 0}
   138  
   139  const (
   140  	raceGetProcCmd = iota
   141  	raceSymbolizeCodeCmd
   142  	raceSymbolizeDataCmd
   143  )
   144  
   145  // Callback from C into Go, runs on g0.
   146  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   147  	switch cmd {
   148  	case raceGetProcCmd:
   149  		throw("should have been handled by racecallbackthunk")
   150  	case raceSymbolizeCodeCmd:
   151  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   152  	case raceSymbolizeDataCmd:
   153  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   154  	default:
   155  		throw("unknown command")
   156  	}
   157  }
   158  
   159  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   160  // information about the code at that pc.
   161  //
   162  // The race detector has already subtracted 1 from pcs, so they point to the last
   163  // byte of call instructions (including calls to runtime.racewrite and friends).
   164  //
   165  // If the incoming pc is part of an inlined function, *ctx is populated
   166  // with information about the inlined function, and on return ctx.pc is set
   167  // to a pc in the logically containing function. (The race detector should call this
   168  // function again with that pc.)
   169  //
   170  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   171  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   172  	pc := ctx.pc
   173  	fi := findfunc(pc)
   174  	if fi.valid() {
   175  		u, uf := newInlineUnwinder(fi, pc)
   176  		for ; uf.valid(); uf = u.next(uf) {
   177  			sf := u.srcFunc(uf)
   178  			if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
   179  				// Ignore wrappers, unless we're at the outermost frame of u.
   180  				// A non-inlined wrapper frame always means we have a physical
   181  				// frame consisting entirely of wrappers, in which case we'll
   182  				// take an outermost wrapper over nothing.
   183  				continue
   184  			}
   185  
   186  			name := sf.name()
   187  			file, line := u.fileLine(uf)
   188  			if line == 0 {
   189  				// Failure to symbolize
   190  				continue
   191  			}
   192  			ctx.fn = &bytes(name)[0] // assume NUL-terminated
   193  			ctx.line = uintptr(line)
   194  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   195  			ctx.off = pc - fi.entry()
   196  			ctx.res = 1
   197  			if u.isInlined(uf) {
   198  				// Set ctx.pc to the "caller" so the race detector calls this again
   199  				// to further unwind.
   200  				uf = u.next(uf)
   201  				ctx.pc = uf.pc
   202  			}
   203  			return
   204  		}
   205  	}
   206  	ctx.fn = &qq[0]
   207  	ctx.file = &dash[0]
   208  	ctx.line = 0
   209  	ctx.off = ctx.pc
   210  	ctx.res = 1
   211  }
   212  
   213  type symbolizeDataContext struct {
   214  	addr  uintptr
   215  	heap  uintptr
   216  	start uintptr
   217  	size  uintptr
   218  	name  *byte
   219  	file  *byte
   220  	line  uintptr
   221  	res   uintptr
   222  }
   223  
   224  func raceSymbolizeData(ctx *symbolizeDataContext) {
   225  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   226  		// TODO: Does this need to handle malloc headers?
   227  		ctx.heap = 1
   228  		ctx.start = base
   229  		ctx.size = span.elemsize
   230  		ctx.res = 1
   231  	}
   232  }
   233  
   234  // Race runtime functions called via runtime·racecall.
   235  //
   236  //go:linkname __tsan_init __tsan_init
   237  var __tsan_init byte
   238  
   239  //go:linkname __tsan_fini __tsan_fini
   240  var __tsan_fini byte
   241  
   242  //go:linkname __tsan_proc_create __tsan_proc_create
   243  var __tsan_proc_create byte
   244  
   245  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   246  var __tsan_proc_destroy byte
   247  
   248  //go:linkname __tsan_map_shadow __tsan_map_shadow
   249  var __tsan_map_shadow byte
   250  
   251  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   252  var __tsan_finalizer_goroutine byte
   253  
   254  //go:linkname __tsan_go_start __tsan_go_start
   255  var __tsan_go_start byte
   256  
   257  //go:linkname __tsan_go_end __tsan_go_end
   258  var __tsan_go_end byte
   259  
   260  //go:linkname __tsan_malloc __tsan_malloc
   261  var __tsan_malloc byte
   262  
   263  //go:linkname __tsan_free __tsan_free
   264  var __tsan_free byte
   265  
   266  //go:linkname __tsan_acquire __tsan_acquire
   267  var __tsan_acquire byte
   268  
   269  //go:linkname __tsan_release __tsan_release
   270  var __tsan_release byte
   271  
   272  //go:linkname __tsan_release_acquire __tsan_release_acquire
   273  var __tsan_release_acquire byte
   274  
   275  //go:linkname __tsan_release_merge __tsan_release_merge
   276  var __tsan_release_merge byte
   277  
   278  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   279  var __tsan_go_ignore_sync_begin byte
   280  
   281  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   282  var __tsan_go_ignore_sync_end byte
   283  
   284  //go:linkname __tsan_report_count __tsan_report_count
   285  var __tsan_report_count byte
   286  
   287  // Mimic what cmd/cgo would do.
   288  //
   289  //go:cgo_import_static __tsan_init
   290  //go:cgo_import_static __tsan_fini
   291  //go:cgo_import_static __tsan_proc_create
   292  //go:cgo_import_static __tsan_proc_destroy
   293  //go:cgo_import_static __tsan_map_shadow
   294  //go:cgo_import_static __tsan_finalizer_goroutine
   295  //go:cgo_import_static __tsan_go_start
   296  //go:cgo_import_static __tsan_go_end
   297  //go:cgo_import_static __tsan_malloc
   298  //go:cgo_import_static __tsan_free
   299  //go:cgo_import_static __tsan_acquire
   300  //go:cgo_import_static __tsan_release
   301  //go:cgo_import_static __tsan_release_acquire
   302  //go:cgo_import_static __tsan_release_merge
   303  //go:cgo_import_static __tsan_go_ignore_sync_begin
   304  //go:cgo_import_static __tsan_go_ignore_sync_end
   305  //go:cgo_import_static __tsan_report_count
   306  
   307  // These are called from race_amd64.s.
   308  //
   309  //go:cgo_import_static __tsan_read
   310  //go:cgo_import_static __tsan_read_pc
   311  //go:cgo_import_static __tsan_read_range
   312  //go:cgo_import_static __tsan_write
   313  //go:cgo_import_static __tsan_write_pc
   314  //go:cgo_import_static __tsan_write_range
   315  //go:cgo_import_static __tsan_func_enter
   316  //go:cgo_import_static __tsan_func_exit
   317  
   318  //go:cgo_import_static __tsan_go_atomic32_load
   319  //go:cgo_import_static __tsan_go_atomic64_load
   320  //go:cgo_import_static __tsan_go_atomic32_store
   321  //go:cgo_import_static __tsan_go_atomic64_store
   322  //go:cgo_import_static __tsan_go_atomic32_exchange
   323  //go:cgo_import_static __tsan_go_atomic64_exchange
   324  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   325  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   326  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   327  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   328  
   329  // start/end of global data (data+bss).
   330  var racedatastart uintptr
   331  var racedataend uintptr
   332  
   333  // start/end of heap for race_amd64.s
   334  var racearenastart uintptr
   335  var racearenaend uintptr
   336  
   337  func racefuncenter(callpc uintptr)
   338  func racefuncenterfp(fp uintptr)
   339  func racefuncexit()
   340  func raceread(addr uintptr)
   341  func racewrite(addr uintptr)
   342  func racereadrange(addr, size uintptr)
   343  func racewriterange(addr, size uintptr)
   344  func racereadrangepc1(addr, size, pc uintptr)
   345  func racewriterangepc1(addr, size, pc uintptr)
   346  func racecallbackthunk(uintptr)
   347  
   348  // racecall allows calling an arbitrary function fn from C race runtime
   349  // with up to 4 uintptr arguments.
   350  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   351  
   352  // checks if the address has shadow (i.e. heap or data/bss).
   353  //
   354  //go:nosplit
   355  func isvalidaddr(addr unsafe.Pointer) bool {
   356  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   357  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   358  }
   359  
   360  //go:nosplit
   361  func raceinit() (gctx, pctx uintptr) {
   362  	lockInit(&raceFiniLock, lockRankRaceFini)
   363  
   364  	// On most machines, cgo is required to initialize libc, which is used by race runtime.
   365  	if !iscgo && GOOS != "darwin" {
   366  		throw("raceinit: race build must use cgo")
   367  	}
   368  
   369  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
   370  
   371  	// Round data segment to page boundaries, because it's used in mmap().
   372  	start := ^uintptr(0)
   373  	end := uintptr(0)
   374  	if start > firstmoduledata.noptrdata {
   375  		start = firstmoduledata.noptrdata
   376  	}
   377  	if start > firstmoduledata.data {
   378  		start = firstmoduledata.data
   379  	}
   380  	if start > firstmoduledata.noptrbss {
   381  		start = firstmoduledata.noptrbss
   382  	}
   383  	if start > firstmoduledata.bss {
   384  		start = firstmoduledata.bss
   385  	}
   386  	if end < firstmoduledata.enoptrdata {
   387  		end = firstmoduledata.enoptrdata
   388  	}
   389  	if end < firstmoduledata.edata {
   390  		end = firstmoduledata.edata
   391  	}
   392  	if end < firstmoduledata.enoptrbss {
   393  		end = firstmoduledata.enoptrbss
   394  	}
   395  	if end < firstmoduledata.ebss {
   396  		end = firstmoduledata.ebss
   397  	}
   398  	size := alignUp(end-start, _PageSize)
   399  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   400  	racedatastart = start
   401  	racedataend = start + size
   402  
   403  	return
   404  }
   405  
   406  //go:nosplit
   407  func racefini() {
   408  	// racefini() can only be called once to avoid races.
   409  	// This eventually (via __tsan_fini) calls C.exit which has
   410  	// undefined behavior if called more than once. If the lock is
   411  	// already held it's assumed that the first caller exits the program
   412  	// so other calls can hang forever without an issue.
   413  	lock(&raceFiniLock)
   414  
   415  	// __tsan_fini will run C atexit functions and C++ destructors,
   416  	// which can theoretically call back into Go.
   417  	// Tell the scheduler we entering external code.
   418  	entersyscall()
   419  
   420  	// We're entering external code that may call ExitProcess on
   421  	// Windows.
   422  	osPreemptExtEnter(getg().m)
   423  
   424  	racecall(&__tsan_fini, 0, 0, 0, 0)
   425  }
   426  
   427  //go:nosplit
   428  func raceproccreate() uintptr {
   429  	var ctx uintptr
   430  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   431  	return ctx
   432  }
   433  
   434  //go:nosplit
   435  func raceprocdestroy(ctx uintptr) {
   436  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   437  }
   438  
   439  //go:nosplit
   440  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   441  	if racearenastart == 0 {
   442  		racearenastart = uintptr(addr)
   443  	}
   444  	if racearenaend < uintptr(addr)+size {
   445  		racearenaend = uintptr(addr) + size
   446  	}
   447  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   448  }
   449  
   450  //go:nosplit
   451  func racemalloc(p unsafe.Pointer, sz uintptr) {
   452  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   453  }
   454  
   455  //go:nosplit
   456  func racefree(p unsafe.Pointer, sz uintptr) {
   457  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   458  }
   459  
   460  //go:nosplit
   461  func racegostart(pc uintptr) uintptr {
   462  	gp := getg()
   463  	var spawng *g
   464  	if gp.m.curg != nil {
   465  		spawng = gp.m.curg
   466  	} else {
   467  		spawng = gp
   468  	}
   469  
   470  	var racectx uintptr
   471  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   472  	return racectx
   473  }
   474  
   475  //go:nosplit
   476  func racegoend() {
   477  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   478  }
   479  
   480  //go:nosplit
   481  func racectxend(racectx uintptr) {
   482  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   483  }
   484  
   485  //go:nosplit
   486  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   487  	gp := getg()
   488  	if gp != gp.m.curg {
   489  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   490  		// Not interesting.
   491  		return
   492  	}
   493  	if callpc != 0 {
   494  		racefuncenter(callpc)
   495  	}
   496  	racewriterangepc1(uintptr(addr), sz, pc)
   497  	if callpc != 0 {
   498  		racefuncexit()
   499  	}
   500  }
   501  
   502  //go:nosplit
   503  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   504  	gp := getg()
   505  	if gp != gp.m.curg {
   506  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   507  		// Not interesting.
   508  		return
   509  	}
   510  	if callpc != 0 {
   511  		racefuncenter(callpc)
   512  	}
   513  	racereadrangepc1(uintptr(addr), sz, pc)
   514  	if callpc != 0 {
   515  		racefuncexit()
   516  	}
   517  }
   518  
   519  //go:nosplit
   520  func raceacquire(addr unsafe.Pointer) {
   521  	raceacquireg(getg(), addr)
   522  }
   523  
   524  //go:nosplit
   525  func raceacquireg(gp *g, addr unsafe.Pointer) {
   526  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   527  		return
   528  	}
   529  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   530  }
   531  
   532  //go:nosplit
   533  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   534  	if !isvalidaddr(addr) {
   535  		return
   536  	}
   537  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   538  }
   539  
   540  //go:nosplit
   541  func racerelease(addr unsafe.Pointer) {
   542  	racereleaseg(getg(), addr)
   543  }
   544  
   545  //go:nosplit
   546  func racereleaseg(gp *g, addr unsafe.Pointer) {
   547  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   548  		return
   549  	}
   550  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   551  }
   552  
   553  //go:nosplit
   554  func racereleaseacquire(addr unsafe.Pointer) {
   555  	racereleaseacquireg(getg(), addr)
   556  }
   557  
   558  //go:nosplit
   559  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   560  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   561  		return
   562  	}
   563  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   564  }
   565  
   566  //go:nosplit
   567  func racereleasemerge(addr unsafe.Pointer) {
   568  	racereleasemergeg(getg(), addr)
   569  }
   570  
   571  //go:nosplit
   572  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   573  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   574  		return
   575  	}
   576  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   577  }
   578  
   579  //go:nosplit
   580  func racefingo() {
   581  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   582  }
   583  
   584  // The declarations below generate ABI wrappers for functions
   585  // implemented in assembly in this package but declared in another
   586  // package.
   587  
   588  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   589  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   590  
   591  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   592  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   593  
   594  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   595  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   596  
   597  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   598  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   599  
   600  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   601  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   602  
   603  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   604  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   605  
   606  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   607  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   608  
   609  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   610  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   611  
   612  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   613  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   614  
   615  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   616  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   617  
   618  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   619  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   620  
   621  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   622  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   623  
   624  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   625  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   626  
   627  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   628  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   629  
   630  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   631  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   632  
   633  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   634  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   635  
   636  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   637  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   638  
   639  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   640  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   641  
   642  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   643  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   644  
   645  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   646  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   647  
   648  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   649  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   650  
   651  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   652  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   653  
   654  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   655  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
   656  

View as plain text