Imports #
"fmt"
"io"
"os"
"runtime"
"runtime/pprof"
"time"
"unicode"
"fmt"
"io"
"os"
"runtime"
"runtime/pprof"
"time"
"unicode"
const GC = *ast.BinaryExprconst NoGC Flags = 0type Flags inttype Metrics struct {
gc Flags
marks []*mark
curMark *mark
filebase string
pprofFile *os.File
}type mark struct {
name string
startM runtime.MemStats
endM runtime.MemStats
gcM runtime.MemStats
startT time.Time
endT time.Time
}New creates a new Metrics object. Typical usage should look like: func main() { filename := "" // Set to enable per-phase pprof file output. bench := benchmark.New(benchmark.GC, filename) defer bench.Report(os.Stdout) // etc bench.Start("foo") foo() bench.Start("bar") bar() } Note that a nil Metrics object won't cause any errors, so one could write code like: func main() { enableBenchmarking := flag.Bool("enable", true, "enables benchmarking") flag.Parse() var bench *benchmark.Metrics if *enableBenchmarking { bench = benchmark.New(benchmark.GC) } bench.Start("foo") // etc. }
func New(gc Flags, filebase string) *MetricsReport reports the metrics. Closes the currently Start(ed) range, and writes the report to the given io.Writer.
func (m *Metrics) Report(w io.Writer)Start marks the beginning of a new measurement phase. Once a metric is started, it continues until either a Report is issued, or another Start is called.
func (m *Metrics) Start(name string)func (m *Metrics) closeMark()makeBenchString makes a benchmark string consumable by Go's benchmarking tools.
func makeBenchString(name string) stringfunc makePProfFilename(filebase string, name string, typ string) stringshouldPProf returns true if we should be doing pprof runs.
func (m *Metrics) shouldPProf() boolGenerated with Arrow