Source file src/runtime/metrics.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // Metrics implementation exported to runtime/metrics.
     8  
     9  import (
    10  	"internal/godebugs"
    11  	"internal/runtime/atomic"
    12  	"internal/runtime/gc"
    13  	"unsafe"
    14  )
    15  
    16  var (
    17  	// metrics is a map of runtime/metrics keys to data used by the runtime
    18  	// to sample each metric's value. metricsInit indicates it has been
    19  	// initialized.
    20  	//
    21  	// These fields are protected by metricsSema which should be
    22  	// locked/unlocked with metricsLock() / metricsUnlock().
    23  	metricsSema uint32 = 1
    24  	metricsInit bool
    25  	metrics     map[string]metricData
    26  
    27  	sizeClassBuckets []float64
    28  	timeHistBuckets  []float64
    29  )
    30  
    31  type metricData struct {
    32  	// deps is the set of runtime statistics that this metric
    33  	// depends on. Before compute is called, the statAggregate
    34  	// which will be passed must ensure() these dependencies.
    35  	deps statDepSet
    36  
    37  	// compute is a function that populates a metricValue
    38  	// given a populated statAggregate structure.
    39  	compute func(in *statAggregate, out *metricValue)
    40  }
    41  
    42  func metricsLock() {
    43  	// Acquire the metricsSema but with handoff. Operations are typically
    44  	// expensive enough that queueing up goroutines and handing off between
    45  	// them will be noticeably better-behaved.
    46  	semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
    47  	if raceenabled {
    48  		raceacquire(unsafe.Pointer(&metricsSema))
    49  	}
    50  }
    51  
    52  func metricsUnlock() {
    53  	if raceenabled {
    54  		racerelease(unsafe.Pointer(&metricsSema))
    55  	}
    56  	semrelease(&metricsSema)
    57  }
    58  
    59  // initMetrics initializes the metrics map if it hasn't been yet.
    60  //
    61  // metricsSema must be held.
    62  func initMetrics() {
    63  	if metricsInit {
    64  		return
    65  	}
    66  
    67  	sizeClassBuckets = make([]float64, gc.NumSizeClasses, gc.NumSizeClasses+1)
    68  	// Skip size class 0 which is a stand-in for large objects, but large
    69  	// objects are tracked separately (and they actually get placed in
    70  	// the last bucket, not the first).
    71  	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
    72  	for i := 1; i < gc.NumSizeClasses; i++ {
    73  		// Size classes have an inclusive upper-bound
    74  		// and exclusive lower bound (e.g. 48-byte size class is
    75  		// (32, 48]) whereas we want and inclusive lower-bound
    76  		// and exclusive upper-bound (e.g. 48-byte size class is
    77  		// [33, 49)). We can achieve this by shifting all bucket
    78  		// boundaries up by 1.
    79  		//
    80  		// Also, a float64 can precisely represent integers with
    81  		// value up to 2^53 and size classes are relatively small
    82  		// (nowhere near 2^48 even) so this will give us exact
    83  		// boundaries.
    84  		sizeClassBuckets[i] = float64(gc.SizeClassToSize[i] + 1)
    85  	}
    86  	sizeClassBuckets = append(sizeClassBuckets, float64Inf())
    87  
    88  	timeHistBuckets = timeHistogramMetricsBuckets()
    89  	metrics = map[string]metricData{
    90  		"/cgo/go-to-c-calls:calls": {
    91  			compute: func(_ *statAggregate, out *metricValue) {
    92  				out.kind = metricKindUint64
    93  				out.scalar = uint64(NumCgoCall())
    94  			},
    95  		},
    96  		"/cpu/classes/gc/mark/assist:cpu-seconds": {
    97  			deps: makeStatDepSet(cpuStatsDep),
    98  			compute: func(in *statAggregate, out *metricValue) {
    99  				out.kind = metricKindFloat64
   100  				out.scalar = float64bits(nsToSec(in.cpuStats.GCAssistTime))
   101  			},
   102  		},
   103  		"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
   104  			deps: makeStatDepSet(cpuStatsDep),
   105  			compute: func(in *statAggregate, out *metricValue) {
   106  				out.kind = metricKindFloat64
   107  				out.scalar = float64bits(nsToSec(in.cpuStats.GCDedicatedTime))
   108  			},
   109  		},
   110  		"/cpu/classes/gc/mark/idle:cpu-seconds": {
   111  			deps: makeStatDepSet(cpuStatsDep),
   112  			compute: func(in *statAggregate, out *metricValue) {
   113  				out.kind = metricKindFloat64
   114  				out.scalar = float64bits(nsToSec(in.cpuStats.GCIdleTime))
   115  			},
   116  		},
   117  		"/cpu/classes/gc/pause:cpu-seconds": {
   118  			deps: makeStatDepSet(cpuStatsDep),
   119  			compute: func(in *statAggregate, out *metricValue) {
   120  				out.kind = metricKindFloat64
   121  				out.scalar = float64bits(nsToSec(in.cpuStats.GCPauseTime))
   122  			},
   123  		},
   124  		"/cpu/classes/gc/total:cpu-seconds": {
   125  			deps: makeStatDepSet(cpuStatsDep),
   126  			compute: func(in *statAggregate, out *metricValue) {
   127  				out.kind = metricKindFloat64
   128  				out.scalar = float64bits(nsToSec(in.cpuStats.GCTotalTime))
   129  			},
   130  		},
   131  		"/cpu/classes/idle:cpu-seconds": {
   132  			deps: makeStatDepSet(cpuStatsDep),
   133  			compute: func(in *statAggregate, out *metricValue) {
   134  				out.kind = metricKindFloat64
   135  				out.scalar = float64bits(nsToSec(in.cpuStats.IdleTime))
   136  			},
   137  		},
   138  		"/cpu/classes/scavenge/assist:cpu-seconds": {
   139  			deps: makeStatDepSet(cpuStatsDep),
   140  			compute: func(in *statAggregate, out *metricValue) {
   141  				out.kind = metricKindFloat64
   142  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeAssistTime))
   143  			},
   144  		},
   145  		"/cpu/classes/scavenge/background:cpu-seconds": {
   146  			deps: makeStatDepSet(cpuStatsDep),
   147  			compute: func(in *statAggregate, out *metricValue) {
   148  				out.kind = metricKindFloat64
   149  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeBgTime))
   150  			},
   151  		},
   152  		"/cpu/classes/scavenge/total:cpu-seconds": {
   153  			deps: makeStatDepSet(cpuStatsDep),
   154  			compute: func(in *statAggregate, out *metricValue) {
   155  				out.kind = metricKindFloat64
   156  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeTotalTime))
   157  			},
   158  		},
   159  		"/cpu/classes/total:cpu-seconds": {
   160  			deps: makeStatDepSet(cpuStatsDep),
   161  			compute: func(in *statAggregate, out *metricValue) {
   162  				out.kind = metricKindFloat64
   163  				out.scalar = float64bits(nsToSec(in.cpuStats.TotalTime))
   164  			},
   165  		},
   166  		"/cpu/classes/user:cpu-seconds": {
   167  			deps: makeStatDepSet(cpuStatsDep),
   168  			compute: func(in *statAggregate, out *metricValue) {
   169  				out.kind = metricKindFloat64
   170  				out.scalar = float64bits(nsToSec(in.cpuStats.UserTime))
   171  			},
   172  		},
   173  		"/gc/cleanups/executed:cleanups": {
   174  			deps: makeStatDepSet(finalStatsDep),
   175  			compute: func(in *statAggregate, out *metricValue) {
   176  				out.kind = metricKindUint64
   177  				out.scalar = in.finalStats.cleanupsExecuted
   178  			},
   179  		},
   180  		"/gc/cleanups/queued:cleanups": {
   181  			deps: makeStatDepSet(finalStatsDep),
   182  			compute: func(in *statAggregate, out *metricValue) {
   183  				out.kind = metricKindUint64
   184  				out.scalar = in.finalStats.cleanupsQueued
   185  			},
   186  		},
   187  		"/gc/cycles/automatic:gc-cycles": {
   188  			deps: makeStatDepSet(sysStatsDep),
   189  			compute: func(in *statAggregate, out *metricValue) {
   190  				out.kind = metricKindUint64
   191  				out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
   192  			},
   193  		},
   194  		"/gc/cycles/forced:gc-cycles": {
   195  			deps: makeStatDepSet(sysStatsDep),
   196  			compute: func(in *statAggregate, out *metricValue) {
   197  				out.kind = metricKindUint64
   198  				out.scalar = in.sysStats.gcCyclesForced
   199  			},
   200  		},
   201  		"/gc/cycles/total:gc-cycles": {
   202  			deps: makeStatDepSet(sysStatsDep),
   203  			compute: func(in *statAggregate, out *metricValue) {
   204  				out.kind = metricKindUint64
   205  				out.scalar = in.sysStats.gcCyclesDone
   206  			},
   207  		},
   208  		"/gc/finalizers/executed:finalizers": {
   209  			deps: makeStatDepSet(finalStatsDep),
   210  			compute: func(in *statAggregate, out *metricValue) {
   211  				out.kind = metricKindUint64
   212  				out.scalar = in.finalStats.finalizersExecuted
   213  			},
   214  		},
   215  		"/gc/finalizers/queued:finalizers": {
   216  			deps: makeStatDepSet(finalStatsDep),
   217  			compute: func(in *statAggregate, out *metricValue) {
   218  				out.kind = metricKindUint64
   219  				out.scalar = in.finalStats.finalizersQueued
   220  			},
   221  		},
   222  		"/gc/scan/globals:bytes": {
   223  			deps: makeStatDepSet(gcStatsDep),
   224  			compute: func(in *statAggregate, out *metricValue) {
   225  				out.kind = metricKindUint64
   226  				out.scalar = in.gcStats.globalsScan
   227  			},
   228  		},
   229  		"/gc/scan/heap:bytes": {
   230  			deps: makeStatDepSet(gcStatsDep),
   231  			compute: func(in *statAggregate, out *metricValue) {
   232  				out.kind = metricKindUint64
   233  				out.scalar = in.gcStats.heapScan
   234  			},
   235  		},
   236  		"/gc/scan/stack:bytes": {
   237  			deps: makeStatDepSet(gcStatsDep),
   238  			compute: func(in *statAggregate, out *metricValue) {
   239  				out.kind = metricKindUint64
   240  				out.scalar = in.gcStats.stackScan
   241  			},
   242  		},
   243  		"/gc/scan/total:bytes": {
   244  			deps: makeStatDepSet(gcStatsDep),
   245  			compute: func(in *statAggregate, out *metricValue) {
   246  				out.kind = metricKindUint64
   247  				out.scalar = in.gcStats.totalScan
   248  			},
   249  		},
   250  		"/gc/heap/allocs-by-size:bytes": {
   251  			deps: makeStatDepSet(heapStatsDep),
   252  			compute: func(in *statAggregate, out *metricValue) {
   253  				hist := out.float64HistOrInit(sizeClassBuckets)
   254  				hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
   255  				// Cut off the first index which is ostensibly for size class 0,
   256  				// but large objects are tracked separately so it's actually unused.
   257  				for i, count := range in.heapStats.smallAllocCount[1:] {
   258  					hist.counts[i] = count
   259  				}
   260  			},
   261  		},
   262  		"/gc/heap/allocs:bytes": {
   263  			deps: makeStatDepSet(heapStatsDep),
   264  			compute: func(in *statAggregate, out *metricValue) {
   265  				out.kind = metricKindUint64
   266  				out.scalar = in.heapStats.totalAllocated
   267  			},
   268  		},
   269  		"/gc/heap/allocs:objects": {
   270  			deps: makeStatDepSet(heapStatsDep),
   271  			compute: func(in *statAggregate, out *metricValue) {
   272  				out.kind = metricKindUint64
   273  				out.scalar = in.heapStats.totalAllocs
   274  			},
   275  		},
   276  		"/gc/heap/frees-by-size:bytes": {
   277  			deps: makeStatDepSet(heapStatsDep),
   278  			compute: func(in *statAggregate, out *metricValue) {
   279  				hist := out.float64HistOrInit(sizeClassBuckets)
   280  				hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
   281  				// Cut off the first index which is ostensibly for size class 0,
   282  				// but large objects are tracked separately so it's actually unused.
   283  				for i, count := range in.heapStats.smallFreeCount[1:] {
   284  					hist.counts[i] = count
   285  				}
   286  			},
   287  		},
   288  		"/gc/heap/frees:bytes": {
   289  			deps: makeStatDepSet(heapStatsDep),
   290  			compute: func(in *statAggregate, out *metricValue) {
   291  				out.kind = metricKindUint64
   292  				out.scalar = in.heapStats.totalFreed
   293  			},
   294  		},
   295  		"/gc/heap/frees:objects": {
   296  			deps: makeStatDepSet(heapStatsDep),
   297  			compute: func(in *statAggregate, out *metricValue) {
   298  				out.kind = metricKindUint64
   299  				out.scalar = in.heapStats.totalFrees
   300  			},
   301  		},
   302  		"/gc/heap/goal:bytes": {
   303  			deps: makeStatDepSet(sysStatsDep),
   304  			compute: func(in *statAggregate, out *metricValue) {
   305  				out.kind = metricKindUint64
   306  				out.scalar = in.sysStats.heapGoal
   307  			},
   308  		},
   309  		"/gc/gomemlimit:bytes": {
   310  			compute: func(in *statAggregate, out *metricValue) {
   311  				out.kind = metricKindUint64
   312  				out.scalar = uint64(gcController.memoryLimit.Load())
   313  			},
   314  		},
   315  		"/gc/gogc:percent": {
   316  			compute: func(in *statAggregate, out *metricValue) {
   317  				out.kind = metricKindUint64
   318  				out.scalar = uint64(gcController.gcPercent.Load())
   319  			},
   320  		},
   321  		"/gc/heap/live:bytes": {
   322  			deps: makeStatDepSet(heapStatsDep),
   323  			compute: func(in *statAggregate, out *metricValue) {
   324  				out.kind = metricKindUint64
   325  				out.scalar = gcController.heapMarked
   326  			},
   327  		},
   328  		"/gc/heap/objects:objects": {
   329  			deps: makeStatDepSet(heapStatsDep),
   330  			compute: func(in *statAggregate, out *metricValue) {
   331  				out.kind = metricKindUint64
   332  				out.scalar = in.heapStats.numObjects
   333  			},
   334  		},
   335  		"/gc/heap/tiny/allocs:objects": {
   336  			deps: makeStatDepSet(heapStatsDep),
   337  			compute: func(in *statAggregate, out *metricValue) {
   338  				out.kind = metricKindUint64
   339  				out.scalar = in.heapStats.tinyAllocCount
   340  			},
   341  		},
   342  		"/gc/limiter/last-enabled:gc-cycle": {
   343  			compute: func(_ *statAggregate, out *metricValue) {
   344  				out.kind = metricKindUint64
   345  				out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
   346  			},
   347  		},
   348  		"/gc/pauses:seconds": {
   349  			compute: func(_ *statAggregate, out *metricValue) {
   350  				// N.B. this is identical to /sched/pauses/total/gc:seconds.
   351  				sched.stwTotalTimeGC.write(out)
   352  			},
   353  		},
   354  		"/gc/stack/starting-size:bytes": {
   355  			compute: func(in *statAggregate, out *metricValue) {
   356  				out.kind = metricKindUint64
   357  				out.scalar = uint64(startingStackSize)
   358  			},
   359  		},
   360  		"/memory/classes/heap/free:bytes": {
   361  			deps: makeStatDepSet(heapStatsDep),
   362  			compute: func(in *statAggregate, out *metricValue) {
   363  				out.kind = metricKindUint64
   364  				out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
   365  					in.heapStats.inStacks - in.heapStats.inWorkBufs)
   366  			},
   367  		},
   368  		"/memory/classes/heap/objects:bytes": {
   369  			deps: makeStatDepSet(heapStatsDep),
   370  			compute: func(in *statAggregate, out *metricValue) {
   371  				out.kind = metricKindUint64
   372  				out.scalar = in.heapStats.inObjects
   373  			},
   374  		},
   375  		"/memory/classes/heap/released:bytes": {
   376  			deps: makeStatDepSet(heapStatsDep),
   377  			compute: func(in *statAggregate, out *metricValue) {
   378  				out.kind = metricKindUint64
   379  				out.scalar = uint64(in.heapStats.released)
   380  			},
   381  		},
   382  		"/memory/classes/heap/stacks:bytes": {
   383  			deps: makeStatDepSet(heapStatsDep),
   384  			compute: func(in *statAggregate, out *metricValue) {
   385  				out.kind = metricKindUint64
   386  				out.scalar = uint64(in.heapStats.inStacks)
   387  			},
   388  		},
   389  		"/memory/classes/heap/unused:bytes": {
   390  			deps: makeStatDepSet(heapStatsDep),
   391  			compute: func(in *statAggregate, out *metricValue) {
   392  				out.kind = metricKindUint64
   393  				out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
   394  			},
   395  		},
   396  		"/memory/classes/metadata/mcache/free:bytes": {
   397  			deps: makeStatDepSet(sysStatsDep),
   398  			compute: func(in *statAggregate, out *metricValue) {
   399  				out.kind = metricKindUint64
   400  				out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
   401  			},
   402  		},
   403  		"/memory/classes/metadata/mcache/inuse:bytes": {
   404  			deps: makeStatDepSet(sysStatsDep),
   405  			compute: func(in *statAggregate, out *metricValue) {
   406  				out.kind = metricKindUint64
   407  				out.scalar = in.sysStats.mCacheInUse
   408  			},
   409  		},
   410  		"/memory/classes/metadata/mspan/free:bytes": {
   411  			deps: makeStatDepSet(sysStatsDep),
   412  			compute: func(in *statAggregate, out *metricValue) {
   413  				out.kind = metricKindUint64
   414  				out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
   415  			},
   416  		},
   417  		"/memory/classes/metadata/mspan/inuse:bytes": {
   418  			deps: makeStatDepSet(sysStatsDep),
   419  			compute: func(in *statAggregate, out *metricValue) {
   420  				out.kind = metricKindUint64
   421  				out.scalar = in.sysStats.mSpanInUse
   422  			},
   423  		},
   424  		"/memory/classes/metadata/other:bytes": {
   425  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
   426  			compute: func(in *statAggregate, out *metricValue) {
   427  				out.kind = metricKindUint64
   428  				out.scalar = uint64(in.heapStats.inWorkBufs) + in.sysStats.gcMiscSys
   429  			},
   430  		},
   431  		"/memory/classes/os-stacks:bytes": {
   432  			deps: makeStatDepSet(sysStatsDep),
   433  			compute: func(in *statAggregate, out *metricValue) {
   434  				out.kind = metricKindUint64
   435  				out.scalar = in.sysStats.stacksSys
   436  			},
   437  		},
   438  		"/memory/classes/other:bytes": {
   439  			deps: makeStatDepSet(sysStatsDep),
   440  			compute: func(in *statAggregate, out *metricValue) {
   441  				out.kind = metricKindUint64
   442  				out.scalar = in.sysStats.otherSys
   443  			},
   444  		},
   445  		"/memory/classes/profiling/buckets:bytes": {
   446  			deps: makeStatDepSet(sysStatsDep),
   447  			compute: func(in *statAggregate, out *metricValue) {
   448  				out.kind = metricKindUint64
   449  				out.scalar = in.sysStats.buckHashSys
   450  			},
   451  		},
   452  		"/memory/classes/total:bytes": {
   453  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
   454  			compute: func(in *statAggregate, out *metricValue) {
   455  				out.kind = metricKindUint64
   456  				out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
   457  					in.sysStats.stacksSys + in.sysStats.mSpanSys +
   458  					in.sysStats.mCacheSys + in.sysStats.buckHashSys +
   459  					in.sysStats.gcMiscSys + in.sysStats.otherSys
   460  			},
   461  		},
   462  		"/sched/gomaxprocs:threads": {
   463  			compute: func(_ *statAggregate, out *metricValue) {
   464  				out.kind = metricKindUint64
   465  				out.scalar = uint64(gomaxprocs)
   466  			},
   467  		},
   468  		"/sched/goroutines:goroutines": {
   469  			deps: makeStatDepSet(schedStatsDep),
   470  			compute: func(in *statAggregate, out *metricValue) {
   471  				out.kind = metricKindUint64
   472  				out.scalar = in.schedStats.gTotal
   473  			},
   474  		},
   475  		"/sched/goroutines/not-in-go:goroutines": {
   476  			deps: makeStatDepSet(schedStatsDep),
   477  			compute: func(in *statAggregate, out *metricValue) {
   478  				out.kind = metricKindUint64
   479  				out.scalar = in.schedStats.gNonGo
   480  			},
   481  		},
   482  		"/sched/goroutines/running:goroutines": {
   483  			deps: makeStatDepSet(schedStatsDep),
   484  			compute: func(in *statAggregate, out *metricValue) {
   485  				out.kind = metricKindUint64
   486  				out.scalar = in.schedStats.gRunning
   487  			},
   488  		},
   489  		"/sched/goroutines/runnable:goroutines": {
   490  			deps: makeStatDepSet(schedStatsDep),
   491  			compute: func(in *statAggregate, out *metricValue) {
   492  				out.kind = metricKindUint64
   493  				out.scalar = in.schedStats.gRunnable
   494  			},
   495  		},
   496  		"/sched/goroutines/waiting:goroutines": {
   497  			deps: makeStatDepSet(schedStatsDep),
   498  			compute: func(in *statAggregate, out *metricValue) {
   499  				out.kind = metricKindUint64
   500  				out.scalar = in.schedStats.gWaiting
   501  			},
   502  		},
   503  		"/sched/goroutines-created:goroutines": {
   504  			deps: makeStatDepSet(schedStatsDep),
   505  			compute: func(in *statAggregate, out *metricValue) {
   506  				out.kind = metricKindUint64
   507  				out.scalar = in.schedStats.gCreated
   508  			},
   509  		},
   510  		"/sched/latencies:seconds": {
   511  			compute: func(_ *statAggregate, out *metricValue) {
   512  				sched.timeToRun.write(out)
   513  			},
   514  		},
   515  		"/sched/pauses/stopping/gc:seconds": {
   516  			compute: func(_ *statAggregate, out *metricValue) {
   517  				sched.stwStoppingTimeGC.write(out)
   518  			},
   519  		},
   520  		"/sched/pauses/stopping/other:seconds": {
   521  			compute: func(_ *statAggregate, out *metricValue) {
   522  				sched.stwStoppingTimeOther.write(out)
   523  			},
   524  		},
   525  		"/sched/pauses/total/gc:seconds": {
   526  			compute: func(_ *statAggregate, out *metricValue) {
   527  				sched.stwTotalTimeGC.write(out)
   528  			},
   529  		},
   530  		"/sched/pauses/total/other:seconds": {
   531  			compute: func(_ *statAggregate, out *metricValue) {
   532  				sched.stwTotalTimeOther.write(out)
   533  			},
   534  		},
   535  		"/sched/threads/total:threads": {
   536  			deps: makeStatDepSet(schedStatsDep),
   537  			compute: func(in *statAggregate, out *metricValue) {
   538  				out.kind = metricKindUint64
   539  				out.scalar = in.schedStats.threads
   540  			},
   541  		},
   542  		"/sync/mutex/wait/total:seconds": {
   543  			compute: func(_ *statAggregate, out *metricValue) {
   544  				out.kind = metricKindFloat64
   545  				out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
   546  			},
   547  		},
   548  	}
   549  
   550  	for _, info := range godebugs.All {
   551  		if !info.Opaque {
   552  			metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0}
   553  		}
   554  	}
   555  
   556  	metricsInit = true
   557  }
   558  
   559  func compute0(_ *statAggregate, out *metricValue) {
   560  	out.kind = metricKindUint64
   561  	out.scalar = 0
   562  }
   563  
   564  type metricReader func() uint64
   565  
   566  func (f metricReader) compute(_ *statAggregate, out *metricValue) {
   567  	out.kind = metricKindUint64
   568  	out.scalar = f()
   569  }
   570  
   571  //go:linkname godebug_registerMetric internal/godebug.registerMetric
   572  func godebug_registerMetric(name string, read func() uint64) {
   573  	metricsLock()
   574  	initMetrics()
   575  	d, ok := metrics[name]
   576  	if !ok {
   577  		throw("runtime: unexpected metric registration for " + name)
   578  	}
   579  	d.compute = metricReader(read).compute
   580  	metrics[name] = d
   581  	metricsUnlock()
   582  }
   583  
   584  // statDep is a dependency on a group of statistics
   585  // that a metric might have.
   586  type statDep uint
   587  
   588  const (
   589  	heapStatsDep  statDep = iota // corresponds to heapStatsAggregate
   590  	sysStatsDep                  // corresponds to sysStatsAggregate
   591  	cpuStatsDep                  // corresponds to cpuStatsAggregate
   592  	gcStatsDep                   // corresponds to gcStatsAggregate
   593  	finalStatsDep                // corresponds to finalStatsAggregate
   594  	schedStatsDep                // corresponds to schedStatsAggregate
   595  	numStatsDeps
   596  )
   597  
   598  // statDepSet represents a set of statDeps.
   599  //
   600  // Under the hood, it's a bitmap.
   601  type statDepSet [1]uint64
   602  
   603  // makeStatDepSet creates a new statDepSet from a list of statDeps.
   604  func makeStatDepSet(deps ...statDep) statDepSet {
   605  	var s statDepSet
   606  	for _, d := range deps {
   607  		s[d/64] |= 1 << (d % 64)
   608  	}
   609  	return s
   610  }
   611  
   612  // difference returns set difference of s from b as a new set.
   613  func (s statDepSet) difference(b statDepSet) statDepSet {
   614  	var c statDepSet
   615  	for i := range s {
   616  		c[i] = s[i] &^ b[i]
   617  	}
   618  	return c
   619  }
   620  
   621  // union returns the union of the two sets as a new set.
   622  func (s statDepSet) union(b statDepSet) statDepSet {
   623  	var c statDepSet
   624  	for i := range s {
   625  		c[i] = s[i] | b[i]
   626  	}
   627  	return c
   628  }
   629  
   630  // empty returns true if there are no dependencies in the set.
   631  func (s *statDepSet) empty() bool {
   632  	for _, c := range s {
   633  		if c != 0 {
   634  			return false
   635  		}
   636  	}
   637  	return true
   638  }
   639  
   640  // has returns true if the set contains a given statDep.
   641  func (s *statDepSet) has(d statDep) bool {
   642  	return s[d/64]&(1<<(d%64)) != 0
   643  }
   644  
   645  // heapStatsAggregate represents memory stats obtained from the
   646  // runtime. This set of stats is grouped together because they
   647  // depend on each other in some way to make sense of the runtime's
   648  // current heap memory use. They're also sharded across Ps, so it
   649  // makes sense to grab them all at once.
   650  type heapStatsAggregate struct {
   651  	heapStatsDelta
   652  
   653  	// Derived from values in heapStatsDelta.
   654  
   655  	// inObjects is the bytes of memory occupied by objects,
   656  	inObjects uint64
   657  
   658  	// numObjects is the number of live objects in the heap.
   659  	numObjects uint64
   660  
   661  	// totalAllocated is the total bytes of heap objects allocated
   662  	// over the lifetime of the program.
   663  	totalAllocated uint64
   664  
   665  	// totalFreed is the total bytes of heap objects freed
   666  	// over the lifetime of the program.
   667  	totalFreed uint64
   668  
   669  	// totalAllocs is the number of heap objects allocated over
   670  	// the lifetime of the program.
   671  	totalAllocs uint64
   672  
   673  	// totalFrees is the number of heap objects freed over
   674  	// the lifetime of the program.
   675  	totalFrees uint64
   676  }
   677  
   678  // compute populates the heapStatsAggregate with values from the runtime.
   679  func (a *heapStatsAggregate) compute() {
   680  	memstats.heapStats.read(&a.heapStatsDelta)
   681  
   682  	// Calculate derived stats.
   683  	a.totalAllocs = a.largeAllocCount
   684  	a.totalFrees = a.largeFreeCount
   685  	a.totalAllocated = a.largeAlloc
   686  	a.totalFreed = a.largeFree
   687  	for i := range a.smallAllocCount {
   688  		na := a.smallAllocCount[i]
   689  		nf := a.smallFreeCount[i]
   690  		a.totalAllocs += na
   691  		a.totalFrees += nf
   692  		a.totalAllocated += na * uint64(gc.SizeClassToSize[i])
   693  		a.totalFreed += nf * uint64(gc.SizeClassToSize[i])
   694  	}
   695  	a.inObjects = a.totalAllocated - a.totalFreed
   696  	a.numObjects = a.totalAllocs - a.totalFrees
   697  }
   698  
   699  // sysStatsAggregate represents system memory stats obtained
   700  // from the runtime. This set of stats is grouped together because
   701  // they're all relatively cheap to acquire and generally independent
   702  // of one another and other runtime memory stats. The fact that they
   703  // may be acquired at different times, especially with respect to
   704  // heapStatsAggregate, means there could be some skew, but because of
   705  // these stats are independent, there's no real consistency issue here.
   706  type sysStatsAggregate struct {
   707  	stacksSys      uint64
   708  	mSpanSys       uint64
   709  	mSpanInUse     uint64
   710  	mCacheSys      uint64
   711  	mCacheInUse    uint64
   712  	buckHashSys    uint64
   713  	gcMiscSys      uint64
   714  	otherSys       uint64
   715  	heapGoal       uint64
   716  	gcCyclesDone   uint64
   717  	gcCyclesForced uint64
   718  }
   719  
   720  // compute populates the sysStatsAggregate with values from the runtime.
   721  func (a *sysStatsAggregate) compute() {
   722  	a.stacksSys = memstats.stacks_sys.load()
   723  	a.buckHashSys = memstats.buckhash_sys.load()
   724  	a.gcMiscSys = memstats.gcMiscSys.load()
   725  	a.otherSys = memstats.other_sys.load()
   726  	a.heapGoal = gcController.heapGoal()
   727  	a.gcCyclesDone = uint64(memstats.numgc)
   728  	a.gcCyclesForced = uint64(memstats.numforcedgc)
   729  
   730  	systemstack(func() {
   731  		lock(&mheap_.lock)
   732  		a.mSpanSys = memstats.mspan_sys.load()
   733  		a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
   734  		a.mCacheSys = memstats.mcache_sys.load()
   735  		a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
   736  		unlock(&mheap_.lock)
   737  	})
   738  }
   739  
   740  // cpuStatsAggregate represents CPU stats obtained from the runtime
   741  // acquired together to avoid skew and inconsistencies.
   742  type cpuStatsAggregate struct {
   743  	cpuStats
   744  }
   745  
   746  // compute populates the cpuStatsAggregate with values from the runtime.
   747  func (a *cpuStatsAggregate) compute() {
   748  	a.cpuStats = work.cpuStats
   749  	// TODO(mknyszek): Update the CPU stats again so that we're not
   750  	// just relying on the STW snapshot. The issue here is that currently
   751  	// this will cause non-monotonicity in the "user" CPU time metric.
   752  	//
   753  	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
   754  }
   755  
   756  // gcStatsAggregate represents various GC stats obtained from the runtime
   757  // acquired together to avoid skew and inconsistencies.
   758  type gcStatsAggregate struct {
   759  	heapScan    uint64
   760  	stackScan   uint64
   761  	globalsScan uint64
   762  	totalScan   uint64
   763  }
   764  
   765  // compute populates the gcStatsAggregate with values from the runtime.
   766  func (a *gcStatsAggregate) compute() {
   767  	a.heapScan = gcController.heapScan.Load()
   768  	a.stackScan = gcController.lastStackScan.Load()
   769  	a.globalsScan = gcController.globalsScan.Load()
   770  	a.totalScan = a.heapScan + a.stackScan + a.globalsScan
   771  }
   772  
   773  // finalStatsAggregate represents various finalizer/cleanup stats obtained
   774  // from the runtime acquired together to avoid skew and inconsistencies.
   775  type finalStatsAggregate struct {
   776  	finalizersQueued   uint64
   777  	finalizersExecuted uint64
   778  	cleanupsQueued     uint64
   779  	cleanupsExecuted   uint64
   780  }
   781  
   782  // compute populates the finalStatsAggregate with values from the runtime.
   783  func (a *finalStatsAggregate) compute() {
   784  	a.finalizersQueued, a.finalizersExecuted = finReadQueueStats()
   785  	a.cleanupsQueued, a.cleanupsExecuted = gcCleanups.readQueueStats()
   786  }
   787  
   788  // schedStatsAggregate contains stats about the scheduler, including
   789  // an approximate count of goroutines in each state.
   790  type schedStatsAggregate struct {
   791  	gTotal    uint64
   792  	gRunning  uint64
   793  	gRunnable uint64
   794  	gNonGo    uint64
   795  	gWaiting  uint64
   796  	gCreated  uint64
   797  	threads   uint64
   798  }
   799  
   800  // compute populates the schedStatsAggregate with values from the runtime.
   801  func (a *schedStatsAggregate) compute() {
   802  	// Lock the scheduler so the global run queue can't change and
   803  	// the number of Ps can't change. This doesn't prevent the
   804  	// local run queues from changing, so the results are still
   805  	// approximate.
   806  	lock(&sched.lock)
   807  
   808  	// The total count of threads owned by Go is the number of Ms
   809  	// minus extra Ms on the list or in use.
   810  	a.threads = uint64(mcount()) - uint64(extraMInUse.Load()) - uint64(extraMLength.Load())
   811  
   812  	// Collect running/runnable from per-P run queues.
   813  	a.gCreated += sched.goroutinesCreated.Load()
   814  	for _, p := range allp {
   815  		if p == nil || p.status == _Pdead {
   816  			break
   817  		}
   818  		a.gCreated += p.goroutinesCreated
   819  		switch p.status {
   820  		case _Prunning:
   821  			if thread, ok := setBlockOnExitSyscall(p); ok {
   822  				thread.resume()
   823  				a.gNonGo++
   824  			} else {
   825  				a.gRunning++
   826  			}
   827  		case _Pgcstop:
   828  			// The world is stopping or stopped.
   829  			// This is fine. The results will be
   830  			// slightly odd since nothing else
   831  			// is running, but it will be accurate.
   832  		}
   833  
   834  		for {
   835  			h := atomic.Load(&p.runqhead)
   836  			t := atomic.Load(&p.runqtail)
   837  			next := atomic.Loaduintptr((*uintptr)(&p.runnext))
   838  			runnable := int32(t - h)
   839  			if atomic.Load(&p.runqhead) != h || runnable < 0 {
   840  				continue
   841  			}
   842  			if next != 0 {
   843  				runnable++
   844  			}
   845  			a.gRunnable += uint64(runnable)
   846  			break
   847  		}
   848  	}
   849  
   850  	// Global run queue.
   851  	a.gRunnable += uint64(sched.runq.size)
   852  
   853  	// Account for Gs that are in _Gsyscall without a P.
   854  	nGsyscallNoP := sched.nGsyscallNoP.Load()
   855  
   856  	// nGsyscallNoP can go negative during temporary races.
   857  	if nGsyscallNoP >= 0 {
   858  		a.gNonGo += uint64(nGsyscallNoP)
   859  	}
   860  
   861  	// Compute the number of blocked goroutines. We have to
   862  	// include system goroutines in this count because we included
   863  	// them above.
   864  	a.gTotal = uint64(gcount(true))
   865  	a.gWaiting = a.gTotal - (a.gRunning + a.gRunnable + a.gNonGo)
   866  	if a.gWaiting < 0 {
   867  		a.gWaiting = 0
   868  	}
   869  
   870  	unlock(&sched.lock)
   871  }
   872  
   873  // nsToSec takes a duration in nanoseconds and converts it to seconds as
   874  // a float64.
   875  func nsToSec(ns int64) float64 {
   876  	return float64(ns) / 1e9
   877  }
   878  
   879  // statAggregate is the main driver of the metrics implementation.
   880  //
   881  // It contains multiple aggregates of runtime statistics, as well
   882  // as a set of these aggregates that it has populated. The aggregates
   883  // are populated lazily by its ensure method.
   884  type statAggregate struct {
   885  	ensured    statDepSet
   886  	heapStats  heapStatsAggregate
   887  	sysStats   sysStatsAggregate
   888  	cpuStats   cpuStatsAggregate
   889  	gcStats    gcStatsAggregate
   890  	finalStats finalStatsAggregate
   891  	schedStats schedStatsAggregate
   892  }
   893  
   894  // ensure populates statistics aggregates determined by deps if they
   895  // haven't yet been populated.
   896  func (a *statAggregate) ensure(deps *statDepSet) {
   897  	missing := deps.difference(a.ensured)
   898  	if missing.empty() {
   899  		return
   900  	}
   901  	for i := statDep(0); i < numStatsDeps; i++ {
   902  		if !missing.has(i) {
   903  			continue
   904  		}
   905  		switch i {
   906  		case heapStatsDep:
   907  			a.heapStats.compute()
   908  		case sysStatsDep:
   909  			a.sysStats.compute()
   910  		case cpuStatsDep:
   911  			a.cpuStats.compute()
   912  		case gcStatsDep:
   913  			a.gcStats.compute()
   914  		case finalStatsDep:
   915  			a.finalStats.compute()
   916  		case schedStatsDep:
   917  			a.schedStats.compute()
   918  		}
   919  	}
   920  	a.ensured = a.ensured.union(missing)
   921  }
   922  
   923  // metricKind is a runtime copy of runtime/metrics.ValueKind and
   924  // must be kept structurally identical to that type.
   925  type metricKind int
   926  
   927  const (
   928  	// These values must be kept identical to their corresponding Kind* values
   929  	// in the runtime/metrics package.
   930  	metricKindBad metricKind = iota
   931  	metricKindUint64
   932  	metricKindFloat64
   933  	metricKindFloat64Histogram
   934  )
   935  
   936  // metricSample is a runtime copy of runtime/metrics.Sample and
   937  // must be kept structurally identical to that type.
   938  type metricSample struct {
   939  	name  string
   940  	value metricValue
   941  }
   942  
   943  // metricValue is a runtime copy of runtime/metrics.Sample and
   944  // must be kept structurally identical to that type.
   945  type metricValue struct {
   946  	kind    metricKind
   947  	scalar  uint64         // contains scalar values for scalar Kinds.
   948  	pointer unsafe.Pointer // contains non-scalar values.
   949  }
   950  
   951  // float64HistOrInit tries to pull out an existing float64Histogram
   952  // from the value, but if none exists, then it allocates one with
   953  // the given buckets.
   954  func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
   955  	var hist *metricFloat64Histogram
   956  	if v.kind == metricKindFloat64Histogram && v.pointer != nil {
   957  		hist = (*metricFloat64Histogram)(v.pointer)
   958  	} else {
   959  		v.kind = metricKindFloat64Histogram
   960  		hist = new(metricFloat64Histogram)
   961  		v.pointer = unsafe.Pointer(hist)
   962  	}
   963  	hist.buckets = buckets
   964  	if len(hist.counts) != len(hist.buckets)-1 {
   965  		hist.counts = make([]uint64, len(buckets)-1)
   966  	}
   967  	return hist
   968  }
   969  
   970  // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
   971  // and must be kept structurally identical to that type.
   972  type metricFloat64Histogram struct {
   973  	counts  []uint64
   974  	buckets []float64
   975  }
   976  
   977  // agg is used by readMetrics, and is protected by metricsSema.
   978  //
   979  // Managed as a global variable because its pointer will be
   980  // an argument to a dynamically-defined function, and we'd
   981  // like to avoid it escaping to the heap.
   982  var agg statAggregate
   983  
   984  type metricName struct {
   985  	name string
   986  	kind metricKind
   987  }
   988  
   989  // readMetricNames is the implementation of runtime/metrics.readMetricNames,
   990  // used by the runtime/metrics test and otherwise unreferenced.
   991  //
   992  //go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
   993  func readMetricNames() []string {
   994  	metricsLock()
   995  	initMetrics()
   996  	n := len(metrics)
   997  	metricsUnlock()
   998  
   999  	list := make([]string, 0, n)
  1000  
  1001  	metricsLock()
  1002  	for name := range metrics {
  1003  		list = append(list, name)
  1004  	}
  1005  	metricsUnlock()
  1006  
  1007  	return list
  1008  }
  1009  
  1010  // readMetrics is the implementation of runtime/metrics.Read.
  1011  //
  1012  //go:linkname readMetrics runtime/metrics.runtime_readMetrics
  1013  func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
  1014  	metricsLock()
  1015  
  1016  	// Ensure the map is initialized.
  1017  	initMetrics()
  1018  
  1019  	// Read the metrics.
  1020  	readMetricsLocked(samplesp, len, cap)
  1021  	metricsUnlock()
  1022  }
  1023  
  1024  // readMetricsLocked is the internal, locked portion of readMetrics.
  1025  //
  1026  // Broken out for more robust testing. metricsLock must be held and
  1027  // initMetrics must have been called already.
  1028  func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) {
  1029  	// Construct a slice from the args.
  1030  	sl := slice{samplesp, len, cap}
  1031  	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
  1032  
  1033  	// Clear agg defensively.
  1034  	agg = statAggregate{}
  1035  
  1036  	// Sample.
  1037  	for i := range samples {
  1038  		sample := &samples[i]
  1039  		data, ok := metrics[sample.name]
  1040  		if !ok {
  1041  			sample.value.kind = metricKindBad
  1042  			continue
  1043  		}
  1044  		// Ensure we have all the stats we need.
  1045  		// agg is populated lazily.
  1046  		agg.ensure(&data.deps)
  1047  
  1048  		// Compute the value based on the stats we have.
  1049  		data.compute(&agg, &sample.value)
  1050  	}
  1051  }
  1052  

View as plain text