Source file src/runtime/metrics.go

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // Metrics implementation exported to runtime/metrics.
     8  
     9  import (
    10  	"internal/godebugs"
    11  	"internal/runtime/gc"
    12  	"unsafe"
    13  )
    14  
    15  var (
    16  	// metrics is a map of runtime/metrics keys to data used by the runtime
    17  	// to sample each metric's value. metricsInit indicates it has been
    18  	// initialized.
    19  	//
    20  	// These fields are protected by metricsSema which should be
    21  	// locked/unlocked with metricsLock() / metricsUnlock().
    22  	metricsSema uint32 = 1
    23  	metricsInit bool
    24  	metrics     map[string]metricData
    25  
    26  	sizeClassBuckets []float64
    27  	timeHistBuckets  []float64
    28  )
    29  
    30  type metricData struct {
    31  	// deps is the set of runtime statistics that this metric
    32  	// depends on. Before compute is called, the statAggregate
    33  	// which will be passed must ensure() these dependencies.
    34  	deps statDepSet
    35  
    36  	// compute is a function that populates a metricValue
    37  	// given a populated statAggregate structure.
    38  	compute func(in *statAggregate, out *metricValue)
    39  }
    40  
    41  func metricsLock() {
    42  	// Acquire the metricsSema but with handoff. Operations are typically
    43  	// expensive enough that queueing up goroutines and handing off between
    44  	// them will be noticeably better-behaved.
    45  	semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
    46  	if raceenabled {
    47  		raceacquire(unsafe.Pointer(&metricsSema))
    48  	}
    49  }
    50  
    51  func metricsUnlock() {
    52  	if raceenabled {
    53  		racerelease(unsafe.Pointer(&metricsSema))
    54  	}
    55  	semrelease(&metricsSema)
    56  }
    57  
    58  // initMetrics initializes the metrics map if it hasn't been yet.
    59  //
    60  // metricsSema must be held.
    61  func initMetrics() {
    62  	if metricsInit {
    63  		return
    64  	}
    65  
    66  	sizeClassBuckets = make([]float64, gc.NumSizeClasses, gc.NumSizeClasses+1)
    67  	// Skip size class 0 which is a stand-in for large objects, but large
    68  	// objects are tracked separately (and they actually get placed in
    69  	// the last bucket, not the first).
    70  	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
    71  	for i := 1; i < gc.NumSizeClasses; i++ {
    72  		// Size classes have an inclusive upper-bound
    73  		// and exclusive lower bound (e.g. 48-byte size class is
    74  		// (32, 48]) whereas we want and inclusive lower-bound
    75  		// and exclusive upper-bound (e.g. 48-byte size class is
    76  		// [33, 49)). We can achieve this by shifting all bucket
    77  		// boundaries up by 1.
    78  		//
    79  		// Also, a float64 can precisely represent integers with
    80  		// value up to 2^53 and size classes are relatively small
    81  		// (nowhere near 2^48 even) so this will give us exact
    82  		// boundaries.
    83  		sizeClassBuckets[i] = float64(gc.SizeClassToSize[i] + 1)
    84  	}
    85  	sizeClassBuckets = append(sizeClassBuckets, float64Inf())
    86  
    87  	timeHistBuckets = timeHistogramMetricsBuckets()
    88  	metrics = map[string]metricData{
    89  		"/cgo/go-to-c-calls:calls": {
    90  			compute: func(_ *statAggregate, out *metricValue) {
    91  				out.kind = metricKindUint64
    92  				out.scalar = uint64(NumCgoCall())
    93  			},
    94  		},
    95  		"/cpu/classes/gc/mark/assist:cpu-seconds": {
    96  			deps: makeStatDepSet(cpuStatsDep),
    97  			compute: func(in *statAggregate, out *metricValue) {
    98  				out.kind = metricKindFloat64
    99  				out.scalar = float64bits(nsToSec(in.cpuStats.GCAssistTime))
   100  			},
   101  		},
   102  		"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
   103  			deps: makeStatDepSet(cpuStatsDep),
   104  			compute: func(in *statAggregate, out *metricValue) {
   105  				out.kind = metricKindFloat64
   106  				out.scalar = float64bits(nsToSec(in.cpuStats.GCDedicatedTime))
   107  			},
   108  		},
   109  		"/cpu/classes/gc/mark/idle:cpu-seconds": {
   110  			deps: makeStatDepSet(cpuStatsDep),
   111  			compute: func(in *statAggregate, out *metricValue) {
   112  				out.kind = metricKindFloat64
   113  				out.scalar = float64bits(nsToSec(in.cpuStats.GCIdleTime))
   114  			},
   115  		},
   116  		"/cpu/classes/gc/pause:cpu-seconds": {
   117  			deps: makeStatDepSet(cpuStatsDep),
   118  			compute: func(in *statAggregate, out *metricValue) {
   119  				out.kind = metricKindFloat64
   120  				out.scalar = float64bits(nsToSec(in.cpuStats.GCPauseTime))
   121  			},
   122  		},
   123  		"/cpu/classes/gc/total:cpu-seconds": {
   124  			deps: makeStatDepSet(cpuStatsDep),
   125  			compute: func(in *statAggregate, out *metricValue) {
   126  				out.kind = metricKindFloat64
   127  				out.scalar = float64bits(nsToSec(in.cpuStats.GCTotalTime))
   128  			},
   129  		},
   130  		"/cpu/classes/idle:cpu-seconds": {
   131  			deps: makeStatDepSet(cpuStatsDep),
   132  			compute: func(in *statAggregate, out *metricValue) {
   133  				out.kind = metricKindFloat64
   134  				out.scalar = float64bits(nsToSec(in.cpuStats.IdleTime))
   135  			},
   136  		},
   137  		"/cpu/classes/scavenge/assist:cpu-seconds": {
   138  			deps: makeStatDepSet(cpuStatsDep),
   139  			compute: func(in *statAggregate, out *metricValue) {
   140  				out.kind = metricKindFloat64
   141  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeAssistTime))
   142  			},
   143  		},
   144  		"/cpu/classes/scavenge/background:cpu-seconds": {
   145  			deps: makeStatDepSet(cpuStatsDep),
   146  			compute: func(in *statAggregate, out *metricValue) {
   147  				out.kind = metricKindFloat64
   148  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeBgTime))
   149  			},
   150  		},
   151  		"/cpu/classes/scavenge/total:cpu-seconds": {
   152  			deps: makeStatDepSet(cpuStatsDep),
   153  			compute: func(in *statAggregate, out *metricValue) {
   154  				out.kind = metricKindFloat64
   155  				out.scalar = float64bits(nsToSec(in.cpuStats.ScavengeTotalTime))
   156  			},
   157  		},
   158  		"/cpu/classes/total:cpu-seconds": {
   159  			deps: makeStatDepSet(cpuStatsDep),
   160  			compute: func(in *statAggregate, out *metricValue) {
   161  				out.kind = metricKindFloat64
   162  				out.scalar = float64bits(nsToSec(in.cpuStats.TotalTime))
   163  			},
   164  		},
   165  		"/cpu/classes/user:cpu-seconds": {
   166  			deps: makeStatDepSet(cpuStatsDep),
   167  			compute: func(in *statAggregate, out *metricValue) {
   168  				out.kind = metricKindFloat64
   169  				out.scalar = float64bits(nsToSec(in.cpuStats.UserTime))
   170  			},
   171  		},
   172  		"/gc/cleanups/executed:cleanups": {
   173  			deps: makeStatDepSet(finalStatsDep),
   174  			compute: func(in *statAggregate, out *metricValue) {
   175  				out.kind = metricKindUint64
   176  				out.scalar = in.finalStats.cleanupsExecuted
   177  			},
   178  		},
   179  		"/gc/cleanups/queued:cleanups": {
   180  			deps: makeStatDepSet(finalStatsDep),
   181  			compute: func(in *statAggregate, out *metricValue) {
   182  				out.kind = metricKindUint64
   183  				out.scalar = in.finalStats.cleanupsQueued
   184  			},
   185  		},
   186  		"/gc/cycles/automatic:gc-cycles": {
   187  			deps: makeStatDepSet(sysStatsDep),
   188  			compute: func(in *statAggregate, out *metricValue) {
   189  				out.kind = metricKindUint64
   190  				out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
   191  			},
   192  		},
   193  		"/gc/cycles/forced:gc-cycles": {
   194  			deps: makeStatDepSet(sysStatsDep),
   195  			compute: func(in *statAggregate, out *metricValue) {
   196  				out.kind = metricKindUint64
   197  				out.scalar = in.sysStats.gcCyclesForced
   198  			},
   199  		},
   200  		"/gc/cycles/total:gc-cycles": {
   201  			deps: makeStatDepSet(sysStatsDep),
   202  			compute: func(in *statAggregate, out *metricValue) {
   203  				out.kind = metricKindUint64
   204  				out.scalar = in.sysStats.gcCyclesDone
   205  			},
   206  		},
   207  		"/gc/finalizers/executed:finalizers": {
   208  			deps: makeStatDepSet(finalStatsDep),
   209  			compute: func(in *statAggregate, out *metricValue) {
   210  				out.kind = metricKindUint64
   211  				out.scalar = in.finalStats.finalizersExecuted
   212  			},
   213  		},
   214  		"/gc/finalizers/queued:finalizers": {
   215  			deps: makeStatDepSet(finalStatsDep),
   216  			compute: func(in *statAggregate, out *metricValue) {
   217  				out.kind = metricKindUint64
   218  				out.scalar = in.finalStats.finalizersQueued
   219  			},
   220  		},
   221  		"/gc/scan/globals:bytes": {
   222  			deps: makeStatDepSet(gcStatsDep),
   223  			compute: func(in *statAggregate, out *metricValue) {
   224  				out.kind = metricKindUint64
   225  				out.scalar = in.gcStats.globalsScan
   226  			},
   227  		},
   228  		"/gc/scan/heap:bytes": {
   229  			deps: makeStatDepSet(gcStatsDep),
   230  			compute: func(in *statAggregate, out *metricValue) {
   231  				out.kind = metricKindUint64
   232  				out.scalar = in.gcStats.heapScan
   233  			},
   234  		},
   235  		"/gc/scan/stack:bytes": {
   236  			deps: makeStatDepSet(gcStatsDep),
   237  			compute: func(in *statAggregate, out *metricValue) {
   238  				out.kind = metricKindUint64
   239  				out.scalar = in.gcStats.stackScan
   240  			},
   241  		},
   242  		"/gc/scan/total:bytes": {
   243  			deps: makeStatDepSet(gcStatsDep),
   244  			compute: func(in *statAggregate, out *metricValue) {
   245  				out.kind = metricKindUint64
   246  				out.scalar = in.gcStats.totalScan
   247  			},
   248  		},
   249  		"/gc/heap/allocs-by-size:bytes": {
   250  			deps: makeStatDepSet(heapStatsDep),
   251  			compute: func(in *statAggregate, out *metricValue) {
   252  				hist := out.float64HistOrInit(sizeClassBuckets)
   253  				hist.counts[len(hist.counts)-1] = in.heapStats.largeAllocCount
   254  				// Cut off the first index which is ostensibly for size class 0,
   255  				// but large objects are tracked separately so it's actually unused.
   256  				for i, count := range in.heapStats.smallAllocCount[1:] {
   257  					hist.counts[i] = count
   258  				}
   259  			},
   260  		},
   261  		"/gc/heap/allocs:bytes": {
   262  			deps: makeStatDepSet(heapStatsDep),
   263  			compute: func(in *statAggregate, out *metricValue) {
   264  				out.kind = metricKindUint64
   265  				out.scalar = in.heapStats.totalAllocated
   266  			},
   267  		},
   268  		"/gc/heap/allocs:objects": {
   269  			deps: makeStatDepSet(heapStatsDep),
   270  			compute: func(in *statAggregate, out *metricValue) {
   271  				out.kind = metricKindUint64
   272  				out.scalar = in.heapStats.totalAllocs
   273  			},
   274  		},
   275  		"/gc/heap/frees-by-size:bytes": {
   276  			deps: makeStatDepSet(heapStatsDep),
   277  			compute: func(in *statAggregate, out *metricValue) {
   278  				hist := out.float64HistOrInit(sizeClassBuckets)
   279  				hist.counts[len(hist.counts)-1] = in.heapStats.largeFreeCount
   280  				// Cut off the first index which is ostensibly for size class 0,
   281  				// but large objects are tracked separately so it's actually unused.
   282  				for i, count := range in.heapStats.smallFreeCount[1:] {
   283  					hist.counts[i] = count
   284  				}
   285  			},
   286  		},
   287  		"/gc/heap/frees:bytes": {
   288  			deps: makeStatDepSet(heapStatsDep),
   289  			compute: func(in *statAggregate, out *metricValue) {
   290  				out.kind = metricKindUint64
   291  				out.scalar = in.heapStats.totalFreed
   292  			},
   293  		},
   294  		"/gc/heap/frees:objects": {
   295  			deps: makeStatDepSet(heapStatsDep),
   296  			compute: func(in *statAggregate, out *metricValue) {
   297  				out.kind = metricKindUint64
   298  				out.scalar = in.heapStats.totalFrees
   299  			},
   300  		},
   301  		"/gc/heap/goal:bytes": {
   302  			deps: makeStatDepSet(sysStatsDep),
   303  			compute: func(in *statAggregate, out *metricValue) {
   304  				out.kind = metricKindUint64
   305  				out.scalar = in.sysStats.heapGoal
   306  			},
   307  		},
   308  		"/gc/gomemlimit:bytes": {
   309  			compute: func(in *statAggregate, out *metricValue) {
   310  				out.kind = metricKindUint64
   311  				out.scalar = uint64(gcController.memoryLimit.Load())
   312  			},
   313  		},
   314  		"/gc/gogc:percent": {
   315  			compute: func(in *statAggregate, out *metricValue) {
   316  				out.kind = metricKindUint64
   317  				out.scalar = uint64(gcController.gcPercent.Load())
   318  			},
   319  		},
   320  		"/gc/heap/live:bytes": {
   321  			deps: makeStatDepSet(heapStatsDep),
   322  			compute: func(in *statAggregate, out *metricValue) {
   323  				out.kind = metricKindUint64
   324  				out.scalar = gcController.heapMarked
   325  			},
   326  		},
   327  		"/gc/heap/objects:objects": {
   328  			deps: makeStatDepSet(heapStatsDep),
   329  			compute: func(in *statAggregate, out *metricValue) {
   330  				out.kind = metricKindUint64
   331  				out.scalar = in.heapStats.numObjects
   332  			},
   333  		},
   334  		"/gc/heap/tiny/allocs:objects": {
   335  			deps: makeStatDepSet(heapStatsDep),
   336  			compute: func(in *statAggregate, out *metricValue) {
   337  				out.kind = metricKindUint64
   338  				out.scalar = in.heapStats.tinyAllocCount
   339  			},
   340  		},
   341  		"/gc/limiter/last-enabled:gc-cycle": {
   342  			compute: func(_ *statAggregate, out *metricValue) {
   343  				out.kind = metricKindUint64
   344  				out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
   345  			},
   346  		},
   347  		"/gc/pauses:seconds": {
   348  			compute: func(_ *statAggregate, out *metricValue) {
   349  				// N.B. this is identical to /sched/pauses/total/gc:seconds.
   350  				sched.stwTotalTimeGC.write(out)
   351  			},
   352  		},
   353  		"/gc/stack/starting-size:bytes": {
   354  			compute: func(in *statAggregate, out *metricValue) {
   355  				out.kind = metricKindUint64
   356  				out.scalar = uint64(startingStackSize)
   357  			},
   358  		},
   359  		"/memory/classes/heap/free:bytes": {
   360  			deps: makeStatDepSet(heapStatsDep),
   361  			compute: func(in *statAggregate, out *metricValue) {
   362  				out.kind = metricKindUint64
   363  				out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
   364  					in.heapStats.inStacks - in.heapStats.inWorkBufs)
   365  			},
   366  		},
   367  		"/memory/classes/heap/objects:bytes": {
   368  			deps: makeStatDepSet(heapStatsDep),
   369  			compute: func(in *statAggregate, out *metricValue) {
   370  				out.kind = metricKindUint64
   371  				out.scalar = in.heapStats.inObjects
   372  			},
   373  		},
   374  		"/memory/classes/heap/released:bytes": {
   375  			deps: makeStatDepSet(heapStatsDep),
   376  			compute: func(in *statAggregate, out *metricValue) {
   377  				out.kind = metricKindUint64
   378  				out.scalar = uint64(in.heapStats.released)
   379  			},
   380  		},
   381  		"/memory/classes/heap/stacks:bytes": {
   382  			deps: makeStatDepSet(heapStatsDep),
   383  			compute: func(in *statAggregate, out *metricValue) {
   384  				out.kind = metricKindUint64
   385  				out.scalar = uint64(in.heapStats.inStacks)
   386  			},
   387  		},
   388  		"/memory/classes/heap/unused:bytes": {
   389  			deps: makeStatDepSet(heapStatsDep),
   390  			compute: func(in *statAggregate, out *metricValue) {
   391  				out.kind = metricKindUint64
   392  				out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
   393  			},
   394  		},
   395  		"/memory/classes/metadata/mcache/free:bytes": {
   396  			deps: makeStatDepSet(sysStatsDep),
   397  			compute: func(in *statAggregate, out *metricValue) {
   398  				out.kind = metricKindUint64
   399  				out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
   400  			},
   401  		},
   402  		"/memory/classes/metadata/mcache/inuse:bytes": {
   403  			deps: makeStatDepSet(sysStatsDep),
   404  			compute: func(in *statAggregate, out *metricValue) {
   405  				out.kind = metricKindUint64
   406  				out.scalar = in.sysStats.mCacheInUse
   407  			},
   408  		},
   409  		"/memory/classes/metadata/mspan/free:bytes": {
   410  			deps: makeStatDepSet(sysStatsDep),
   411  			compute: func(in *statAggregate, out *metricValue) {
   412  				out.kind = metricKindUint64
   413  				out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
   414  			},
   415  		},
   416  		"/memory/classes/metadata/mspan/inuse:bytes": {
   417  			deps: makeStatDepSet(sysStatsDep),
   418  			compute: func(in *statAggregate, out *metricValue) {
   419  				out.kind = metricKindUint64
   420  				out.scalar = in.sysStats.mSpanInUse
   421  			},
   422  		},
   423  		"/memory/classes/metadata/other:bytes": {
   424  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
   425  			compute: func(in *statAggregate, out *metricValue) {
   426  				out.kind = metricKindUint64
   427  				out.scalar = uint64(in.heapStats.inWorkBufs) + in.sysStats.gcMiscSys
   428  			},
   429  		},
   430  		"/memory/classes/os-stacks:bytes": {
   431  			deps: makeStatDepSet(sysStatsDep),
   432  			compute: func(in *statAggregate, out *metricValue) {
   433  				out.kind = metricKindUint64
   434  				out.scalar = in.sysStats.stacksSys
   435  			},
   436  		},
   437  		"/memory/classes/other:bytes": {
   438  			deps: makeStatDepSet(sysStatsDep),
   439  			compute: func(in *statAggregate, out *metricValue) {
   440  				out.kind = metricKindUint64
   441  				out.scalar = in.sysStats.otherSys
   442  			},
   443  		},
   444  		"/memory/classes/profiling/buckets:bytes": {
   445  			deps: makeStatDepSet(sysStatsDep),
   446  			compute: func(in *statAggregate, out *metricValue) {
   447  				out.kind = metricKindUint64
   448  				out.scalar = in.sysStats.buckHashSys
   449  			},
   450  		},
   451  		"/memory/classes/total:bytes": {
   452  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
   453  			compute: func(in *statAggregate, out *metricValue) {
   454  				out.kind = metricKindUint64
   455  				out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
   456  					in.sysStats.stacksSys + in.sysStats.mSpanSys +
   457  					in.sysStats.mCacheSys + in.sysStats.buckHashSys +
   458  					in.sysStats.gcMiscSys + in.sysStats.otherSys
   459  			},
   460  		},
   461  		"/sched/gomaxprocs:threads": {
   462  			compute: func(_ *statAggregate, out *metricValue) {
   463  				out.kind = metricKindUint64
   464  				out.scalar = uint64(gomaxprocs)
   465  			},
   466  		},
   467  		"/sched/goroutines:goroutines": {
   468  			compute: func(_ *statAggregate, out *metricValue) {
   469  				out.kind = metricKindUint64
   470  				out.scalar = uint64(gcount())
   471  			},
   472  		},
   473  		"/sched/latencies:seconds": {
   474  			compute: func(_ *statAggregate, out *metricValue) {
   475  				sched.timeToRun.write(out)
   476  			},
   477  		},
   478  		"/sched/pauses/stopping/gc:seconds": {
   479  			compute: func(_ *statAggregate, out *metricValue) {
   480  				sched.stwStoppingTimeGC.write(out)
   481  			},
   482  		},
   483  		"/sched/pauses/stopping/other:seconds": {
   484  			compute: func(_ *statAggregate, out *metricValue) {
   485  				sched.stwStoppingTimeOther.write(out)
   486  			},
   487  		},
   488  		"/sched/pauses/total/gc:seconds": {
   489  			compute: func(_ *statAggregate, out *metricValue) {
   490  				sched.stwTotalTimeGC.write(out)
   491  			},
   492  		},
   493  		"/sched/pauses/total/other:seconds": {
   494  			compute: func(_ *statAggregate, out *metricValue) {
   495  				sched.stwTotalTimeOther.write(out)
   496  			},
   497  		},
   498  		"/sync/mutex/wait/total:seconds": {
   499  			compute: func(_ *statAggregate, out *metricValue) {
   500  				out.kind = metricKindFloat64
   501  				out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos()))
   502  			},
   503  		},
   504  	}
   505  
   506  	for _, info := range godebugs.All {
   507  		if !info.Opaque {
   508  			metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0}
   509  		}
   510  	}
   511  
   512  	metricsInit = true
   513  }
   514  
   515  func compute0(_ *statAggregate, out *metricValue) {
   516  	out.kind = metricKindUint64
   517  	out.scalar = 0
   518  }
   519  
   520  type metricReader func() uint64
   521  
   522  func (f metricReader) compute(_ *statAggregate, out *metricValue) {
   523  	out.kind = metricKindUint64
   524  	out.scalar = f()
   525  }
   526  
   527  //go:linkname godebug_registerMetric internal/godebug.registerMetric
   528  func godebug_registerMetric(name string, read func() uint64) {
   529  	metricsLock()
   530  	initMetrics()
   531  	d, ok := metrics[name]
   532  	if !ok {
   533  		throw("runtime: unexpected metric registration for " + name)
   534  	}
   535  	d.compute = metricReader(read).compute
   536  	metrics[name] = d
   537  	metricsUnlock()
   538  }
   539  
   540  // statDep is a dependency on a group of statistics
   541  // that a metric might have.
   542  type statDep uint
   543  
   544  const (
   545  	heapStatsDep  statDep = iota // corresponds to heapStatsAggregate
   546  	sysStatsDep                  // corresponds to sysStatsAggregate
   547  	cpuStatsDep                  // corresponds to cpuStatsAggregate
   548  	gcStatsDep                   // corresponds to gcStatsAggregate
   549  	finalStatsDep                // corresponds to finalStatsAggregate
   550  	numStatsDeps
   551  )
   552  
   553  // statDepSet represents a set of statDeps.
   554  //
   555  // Under the hood, it's a bitmap.
   556  type statDepSet [1]uint64
   557  
   558  // makeStatDepSet creates a new statDepSet from a list of statDeps.
   559  func makeStatDepSet(deps ...statDep) statDepSet {
   560  	var s statDepSet
   561  	for _, d := range deps {
   562  		s[d/64] |= 1 << (d % 64)
   563  	}
   564  	return s
   565  }
   566  
   567  // difference returns set difference of s from b as a new set.
   568  func (s statDepSet) difference(b statDepSet) statDepSet {
   569  	var c statDepSet
   570  	for i := range s {
   571  		c[i] = s[i] &^ b[i]
   572  	}
   573  	return c
   574  }
   575  
   576  // union returns the union of the two sets as a new set.
   577  func (s statDepSet) union(b statDepSet) statDepSet {
   578  	var c statDepSet
   579  	for i := range s {
   580  		c[i] = s[i] | b[i]
   581  	}
   582  	return c
   583  }
   584  
   585  // empty returns true if there are no dependencies in the set.
   586  func (s *statDepSet) empty() bool {
   587  	for _, c := range s {
   588  		if c != 0 {
   589  			return false
   590  		}
   591  	}
   592  	return true
   593  }
   594  
   595  // has returns true if the set contains a given statDep.
   596  func (s *statDepSet) has(d statDep) bool {
   597  	return s[d/64]&(1<<(d%64)) != 0
   598  }
   599  
   600  // heapStatsAggregate represents memory stats obtained from the
   601  // runtime. This set of stats is grouped together because they
   602  // depend on each other in some way to make sense of the runtime's
   603  // current heap memory use. They're also sharded across Ps, so it
   604  // makes sense to grab them all at once.
   605  type heapStatsAggregate struct {
   606  	heapStatsDelta
   607  
   608  	// Derived from values in heapStatsDelta.
   609  
   610  	// inObjects is the bytes of memory occupied by objects,
   611  	inObjects uint64
   612  
   613  	// numObjects is the number of live objects in the heap.
   614  	numObjects uint64
   615  
   616  	// totalAllocated is the total bytes of heap objects allocated
   617  	// over the lifetime of the program.
   618  	totalAllocated uint64
   619  
   620  	// totalFreed is the total bytes of heap objects freed
   621  	// over the lifetime of the program.
   622  	totalFreed uint64
   623  
   624  	// totalAllocs is the number of heap objects allocated over
   625  	// the lifetime of the program.
   626  	totalAllocs uint64
   627  
   628  	// totalFrees is the number of heap objects freed over
   629  	// the lifetime of the program.
   630  	totalFrees uint64
   631  }
   632  
   633  // compute populates the heapStatsAggregate with values from the runtime.
   634  func (a *heapStatsAggregate) compute() {
   635  	memstats.heapStats.read(&a.heapStatsDelta)
   636  
   637  	// Calculate derived stats.
   638  	a.totalAllocs = a.largeAllocCount
   639  	a.totalFrees = a.largeFreeCount
   640  	a.totalAllocated = a.largeAlloc
   641  	a.totalFreed = a.largeFree
   642  	for i := range a.smallAllocCount {
   643  		na := a.smallAllocCount[i]
   644  		nf := a.smallFreeCount[i]
   645  		a.totalAllocs += na
   646  		a.totalFrees += nf
   647  		a.totalAllocated += na * uint64(gc.SizeClassToSize[i])
   648  		a.totalFreed += nf * uint64(gc.SizeClassToSize[i])
   649  	}
   650  	a.inObjects = a.totalAllocated - a.totalFreed
   651  	a.numObjects = a.totalAllocs - a.totalFrees
   652  }
   653  
   654  // sysStatsAggregate represents system memory stats obtained
   655  // from the runtime. This set of stats is grouped together because
   656  // they're all relatively cheap to acquire and generally independent
   657  // of one another and other runtime memory stats. The fact that they
   658  // may be acquired at different times, especially with respect to
   659  // heapStatsAggregate, means there could be some skew, but because of
   660  // these stats are independent, there's no real consistency issue here.
   661  type sysStatsAggregate struct {
   662  	stacksSys      uint64
   663  	mSpanSys       uint64
   664  	mSpanInUse     uint64
   665  	mCacheSys      uint64
   666  	mCacheInUse    uint64
   667  	buckHashSys    uint64
   668  	gcMiscSys      uint64
   669  	otherSys       uint64
   670  	heapGoal       uint64
   671  	gcCyclesDone   uint64
   672  	gcCyclesForced uint64
   673  }
   674  
   675  // compute populates the sysStatsAggregate with values from the runtime.
   676  func (a *sysStatsAggregate) compute() {
   677  	a.stacksSys = memstats.stacks_sys.load()
   678  	a.buckHashSys = memstats.buckhash_sys.load()
   679  	a.gcMiscSys = memstats.gcMiscSys.load()
   680  	a.otherSys = memstats.other_sys.load()
   681  	a.heapGoal = gcController.heapGoal()
   682  	a.gcCyclesDone = uint64(memstats.numgc)
   683  	a.gcCyclesForced = uint64(memstats.numforcedgc)
   684  
   685  	systemstack(func() {
   686  		lock(&mheap_.lock)
   687  		a.mSpanSys = memstats.mspan_sys.load()
   688  		a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
   689  		a.mCacheSys = memstats.mcache_sys.load()
   690  		a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
   691  		unlock(&mheap_.lock)
   692  	})
   693  }
   694  
   695  // cpuStatsAggregate represents CPU stats obtained from the runtime
   696  // acquired together to avoid skew and inconsistencies.
   697  type cpuStatsAggregate struct {
   698  	cpuStats
   699  }
   700  
   701  // compute populates the cpuStatsAggregate with values from the runtime.
   702  func (a *cpuStatsAggregate) compute() {
   703  	a.cpuStats = work.cpuStats
   704  	// TODO(mknyszek): Update the CPU stats again so that we're not
   705  	// just relying on the STW snapshot. The issue here is that currently
   706  	// this will cause non-monotonicity in the "user" CPU time metric.
   707  	//
   708  	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
   709  }
   710  
   711  // gcStatsAggregate represents various GC stats obtained from the runtime
   712  // acquired together to avoid skew and inconsistencies.
   713  type gcStatsAggregate struct {
   714  	heapScan    uint64
   715  	stackScan   uint64
   716  	globalsScan uint64
   717  	totalScan   uint64
   718  }
   719  
   720  // compute populates the gcStatsAggregate with values from the runtime.
   721  func (a *gcStatsAggregate) compute() {
   722  	a.heapScan = gcController.heapScan.Load()
   723  	a.stackScan = gcController.lastStackScan.Load()
   724  	a.globalsScan = gcController.globalsScan.Load()
   725  	a.totalScan = a.heapScan + a.stackScan + a.globalsScan
   726  }
   727  
   728  // finalStatsAggregate represents various finalizer/cleanup stats obtained
   729  // from the runtime acquired together to avoid skew and inconsistencies.
   730  type finalStatsAggregate struct {
   731  	finalizersQueued   uint64
   732  	finalizersExecuted uint64
   733  	cleanupsQueued     uint64
   734  	cleanupsExecuted   uint64
   735  }
   736  
   737  // compute populates the finalStatsAggregate with values from the runtime.
   738  func (a *finalStatsAggregate) compute() {
   739  	a.finalizersQueued, a.finalizersExecuted = finReadQueueStats()
   740  	a.cleanupsQueued, a.cleanupsExecuted = gcCleanups.readQueueStats()
   741  }
   742  
   743  // nsToSec takes a duration in nanoseconds and converts it to seconds as
   744  // a float64.
   745  func nsToSec(ns int64) float64 {
   746  	return float64(ns) / 1e9
   747  }
   748  
   749  // statAggregate is the main driver of the metrics implementation.
   750  //
   751  // It contains multiple aggregates of runtime statistics, as well
   752  // as a set of these aggregates that it has populated. The aggregates
   753  // are populated lazily by its ensure method.
   754  type statAggregate struct {
   755  	ensured    statDepSet
   756  	heapStats  heapStatsAggregate
   757  	sysStats   sysStatsAggregate
   758  	cpuStats   cpuStatsAggregate
   759  	gcStats    gcStatsAggregate
   760  	finalStats finalStatsAggregate
   761  }
   762  
   763  // ensure populates statistics aggregates determined by deps if they
   764  // haven't yet been populated.
   765  func (a *statAggregate) ensure(deps *statDepSet) {
   766  	missing := deps.difference(a.ensured)
   767  	if missing.empty() {
   768  		return
   769  	}
   770  	for i := statDep(0); i < numStatsDeps; i++ {
   771  		if !missing.has(i) {
   772  			continue
   773  		}
   774  		switch i {
   775  		case heapStatsDep:
   776  			a.heapStats.compute()
   777  		case sysStatsDep:
   778  			a.sysStats.compute()
   779  		case cpuStatsDep:
   780  			a.cpuStats.compute()
   781  		case gcStatsDep:
   782  			a.gcStats.compute()
   783  		case finalStatsDep:
   784  			a.finalStats.compute()
   785  		}
   786  	}
   787  	a.ensured = a.ensured.union(missing)
   788  }
   789  
   790  // metricKind is a runtime copy of runtime/metrics.ValueKind and
   791  // must be kept structurally identical to that type.
   792  type metricKind int
   793  
   794  const (
   795  	// These values must be kept identical to their corresponding Kind* values
   796  	// in the runtime/metrics package.
   797  	metricKindBad metricKind = iota
   798  	metricKindUint64
   799  	metricKindFloat64
   800  	metricKindFloat64Histogram
   801  )
   802  
   803  // metricSample is a runtime copy of runtime/metrics.Sample and
   804  // must be kept structurally identical to that type.
   805  type metricSample struct {
   806  	name  string
   807  	value metricValue
   808  }
   809  
   810  // metricValue is a runtime copy of runtime/metrics.Sample and
   811  // must be kept structurally identical to that type.
   812  type metricValue struct {
   813  	kind    metricKind
   814  	scalar  uint64         // contains scalar values for scalar Kinds.
   815  	pointer unsafe.Pointer // contains non-scalar values.
   816  }
   817  
   818  // float64HistOrInit tries to pull out an existing float64Histogram
   819  // from the value, but if none exists, then it allocates one with
   820  // the given buckets.
   821  func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
   822  	var hist *metricFloat64Histogram
   823  	if v.kind == metricKindFloat64Histogram && v.pointer != nil {
   824  		hist = (*metricFloat64Histogram)(v.pointer)
   825  	} else {
   826  		v.kind = metricKindFloat64Histogram
   827  		hist = new(metricFloat64Histogram)
   828  		v.pointer = unsafe.Pointer(hist)
   829  	}
   830  	hist.buckets = buckets
   831  	if len(hist.counts) != len(hist.buckets)-1 {
   832  		hist.counts = make([]uint64, len(buckets)-1)
   833  	}
   834  	return hist
   835  }
   836  
   837  // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
   838  // and must be kept structurally identical to that type.
   839  type metricFloat64Histogram struct {
   840  	counts  []uint64
   841  	buckets []float64
   842  }
   843  
   844  // agg is used by readMetrics, and is protected by metricsSema.
   845  //
   846  // Managed as a global variable because its pointer will be
   847  // an argument to a dynamically-defined function, and we'd
   848  // like to avoid it escaping to the heap.
   849  var agg statAggregate
   850  
   851  type metricName struct {
   852  	name string
   853  	kind metricKind
   854  }
   855  
   856  // readMetricNames is the implementation of runtime/metrics.readMetricNames,
   857  // used by the runtime/metrics test and otherwise unreferenced.
   858  //
   859  //go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
   860  func readMetricNames() []string {
   861  	metricsLock()
   862  	initMetrics()
   863  	n := len(metrics)
   864  	metricsUnlock()
   865  
   866  	list := make([]string, 0, n)
   867  
   868  	metricsLock()
   869  	for name := range metrics {
   870  		list = append(list, name)
   871  	}
   872  	metricsUnlock()
   873  
   874  	return list
   875  }
   876  
   877  // readMetrics is the implementation of runtime/metrics.Read.
   878  //
   879  //go:linkname readMetrics runtime/metrics.runtime_readMetrics
   880  func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
   881  	metricsLock()
   882  
   883  	// Ensure the map is initialized.
   884  	initMetrics()
   885  
   886  	// Read the metrics.
   887  	readMetricsLocked(samplesp, len, cap)
   888  	metricsUnlock()
   889  }
   890  
   891  // readMetricsLocked is the internal, locked portion of readMetrics.
   892  //
   893  // Broken out for more robust testing. metricsLock must be held and
   894  // initMetrics must have been called already.
   895  func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int) {
   896  	// Construct a slice from the args.
   897  	sl := slice{samplesp, len, cap}
   898  	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
   899  
   900  	// Clear agg defensively.
   901  	agg = statAggregate{}
   902  
   903  	// Sample.
   904  	for i := range samples {
   905  		sample := &samples[i]
   906  		data, ok := metrics[sample.name]
   907  		if !ok {
   908  			sample.value.kind = metricKindBad
   909  			continue
   910  		}
   911  		// Ensure we have all the stats we need.
   912  		// agg is populated lazily.
   913  		agg.ensure(&data.deps)
   914  
   915  		// Compute the value based on the stats we have.
   916  		data.compute(&agg, &sample.value)
   917  	}
   918  }
   919  

View as plain text