benchmark

Imports

Imports #

"fmt"
"io"
"os"
"runtime"
"runtime/pprof"
"time"
"unicode"

Constants & Variables

GC const #

const GC = *ast.BinaryExpr

NoGC const #

const NoGC Flags = 0

Type Aliases

Flags type #

type Flags int

Structs

Metrics struct #

type Metrics struct {
gc Flags
marks []*mark
curMark *mark
filebase string
pprofFile *os.File
}

mark struct #

type mark struct {
name string
startM runtime.MemStats
endM runtime.MemStats
gcM runtime.MemStats
startT time.Time
endT time.Time
}

Functions

New function #

New creates a new Metrics object. Typical usage should look like: func main() { filename := "" // Set to enable per-phase pprof file output. bench := benchmark.New(benchmark.GC, filename) defer bench.Report(os.Stdout) // etc bench.Start("foo") foo() bench.Start("bar") bar() } Note that a nil Metrics object won't cause any errors, so one could write code like: func main() { enableBenchmarking := flag.Bool("enable", true, "enables benchmarking") flag.Parse() var bench *benchmark.Metrics if *enableBenchmarking { bench = benchmark.New(benchmark.GC) } bench.Start("foo") // etc. }

func New(gc Flags, filebase string) *Metrics

Report method #

Report reports the metrics. Closes the currently Start(ed) range, and writes the report to the given io.Writer.

func (m *Metrics) Report(w io.Writer)

Start method #

Start marks the beginning of a new measurement phase. Once a metric is started, it continues until either a Report is issued, or another Start is called.

func (m *Metrics) Start(name string)

closeMark method #

func (m *Metrics) closeMark()

makeBenchString function #

makeBenchString makes a benchmark string consumable by Go's benchmarking tools.

func makeBenchString(name string) string

makePProfFilename function #

func makePProfFilename(filebase string, name string, typ string) string

shouldPProf method #

shouldPProf returns true if we should be doing pprof runs.

func (m *Metrics) shouldPProf() bool

Generated with Arrow