mirror of
https://github.com/golang/go
synced 2024-11-02 08:01:26 +00:00
cmd/pprof: vendor pprof from github.com/google/pprof
Import the github.com/google/pprof and github.com/ianlancetaylor/demangle packages, without modification. Build the golang version of pprof from cmd/pprof/pprof.go by importing the packages from src/cmd/vendot/github.com/google/pprof The versions upstreamed are: github.com/ianlancetaylor/demangle 4883227f66371e02c4948937d3e2be1664d9be38 github.com/google/pprof 7eb5ba977f28f2ad8dd5f6bb82cc9b454e123cdc Update misc/nacl/testzip.proto for new tests. Change-Id: I076584856491353607a3b98b67d0ca6838be50d6 Reviewed-on: https://go-review.googlesource.com/36798 Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
parent
2818cb5c9e
commit
7844ef427a
159 changed files with 31584 additions and 4642 deletions
|
@ -37,6 +37,26 @@ go src=..
|
|||
testdata
|
||||
+
|
||||
vendor
|
||||
github.com
|
||||
google
|
||||
pprof
|
||||
internal
|
||||
driver
|
||||
testdata
|
||||
+
|
||||
graph
|
||||
testdata
|
||||
+
|
||||
report
|
||||
testdata
|
||||
+
|
||||
profile
|
||||
testdata
|
||||
+
|
||||
ianlancetaylor
|
||||
demangle
|
||||
testdata
|
||||
+
|
||||
golang.org
|
||||
x
|
||||
arch
|
||||
|
|
|
@ -1,235 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package commands defines and manages the basic pprof commands
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cmd/internal/browser"
|
||||
"cmd/pprof/internal/plugin"
|
||||
"cmd/pprof/internal/report"
|
||||
"cmd/pprof/internal/svg"
|
||||
"cmd/pprof/internal/tempfile"
|
||||
)
|
||||
|
||||
// Commands describes the commands accepted by pprof.
|
||||
type Commands map[string]*Command
|
||||
|
||||
// Command describes the actions for a pprof command. Includes a
|
||||
// function for command-line completion, the report format to use
|
||||
// during report generation, any postprocessing functions, and whether
|
||||
// the command expects a regexp parameter (typically a function name).
|
||||
type Command struct {
|
||||
Complete Completer // autocomplete for interactive mode
|
||||
Format int // report format to generate
|
||||
PostProcess PostProcessor // postprocessing to run on report
|
||||
HasParam bool // Collect a parameter from the CLI
|
||||
Usage string // Help text
|
||||
}
|
||||
|
||||
// Completer is a function for command-line autocompletion
|
||||
type Completer func(prefix string) string
|
||||
|
||||
// PostProcessor is a function that applies post-processing to the report output
|
||||
type PostProcessor func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error
|
||||
|
||||
// PProf returns the basic pprof report-generation commands
|
||||
func PProf(c Completer, interactive **bool) Commands {
|
||||
return Commands{
|
||||
// Commands that require no post-processing.
|
||||
"tags": {nil, report.Tags, nil, false, "Outputs all tags in the profile"},
|
||||
"raw": {c, report.Raw, nil, false, "Outputs a text representation of the raw profile"},
|
||||
"dot": {c, report.Dot, nil, false, "Outputs a graph in DOT format"},
|
||||
"top": {c, report.Text, nil, false, "Outputs top entries in text form"},
|
||||
"tree": {c, report.Tree, nil, false, "Outputs a text rendering of call graph"},
|
||||
"text": {c, report.Text, nil, false, "Outputs top entries in text form"},
|
||||
"disasm": {c, report.Dis, nil, true, "Output annotated assembly for functions matching regexp or address"},
|
||||
"list": {c, report.List, nil, true, "Output annotated source for functions matching regexp"},
|
||||
"peek": {c, report.Tree, nil, true, "Output callers/callees of functions matching regexp"},
|
||||
|
||||
// Save binary formats to a file
|
||||
"callgrind": {c, report.Callgrind, awayFromTTY(interactive, "callgraph.out"), false, "Outputs a graph in callgrind format"},
|
||||
"proto": {c, report.Proto, awayFromTTY(interactive, "pb.gz"), false, "Outputs the profile in compressed protobuf format"},
|
||||
|
||||
// Generate report in DOT format and postprocess with dot
|
||||
"gif": {c, report.Dot, invokeDot(interactive, "gif"), false, "Outputs a graph image in GIF format"},
|
||||
"pdf": {c, report.Dot, invokeDot(interactive, "pdf"), false, "Outputs a graph in PDF format"},
|
||||
"png": {c, report.Dot, invokeDot(interactive, "png"), false, "Outputs a graph image in PNG format"},
|
||||
"ps": {c, report.Dot, invokeDot(interactive, "ps"), false, "Outputs a graph in PS format"},
|
||||
|
||||
// Save SVG output into a file after including svgpan library
|
||||
"svg": {c, report.Dot, saveSVGToFile(interactive), false, "Outputs a graph in SVG format"},
|
||||
|
||||
// Visualize postprocessed dot output
|
||||
"eog": {c, report.Dot, invokeVisualizer(interactive, invokeDot(nil, "svg"), "svg", []string{"eog"}), false, "Visualize graph through eog"},
|
||||
"evince": {c, report.Dot, invokeVisualizer(interactive, invokeDot(nil, "pdf"), "pdf", []string{"evince"}), false, "Visualize graph through evince"},
|
||||
"gv": {c, report.Dot, invokeVisualizer(interactive, invokeDot(nil, "ps"), "ps", []string{"gv --noantialias"}), false, "Visualize graph through gv"},
|
||||
"web": {c, report.Dot, invokeVisualizer(interactive, saveSVGToFile(nil), "svg", browsers()), false, "Visualize graph through web browser"},
|
||||
|
||||
// Visualize HTML directly generated by report.
|
||||
"weblist": {c, report.WebList, invokeVisualizer(interactive, awayFromTTY(nil, "html"), "html", browsers()), true, "Output annotated source in HTML for functions matching regexp or address"},
|
||||
}
|
||||
}
|
||||
|
||||
// browsers returns a list of commands to attempt for web visualization
|
||||
// on the current platform
|
||||
func browsers() []string {
|
||||
var cmds []string
|
||||
for _, cmd := range browser.Commands() {
|
||||
cmds = append(cmds, strings.Join(cmd, " "))
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
// NewCompleter creates an autocompletion function for a set of commands.
|
||||
func NewCompleter(cs Commands) Completer {
|
||||
return func(line string) string {
|
||||
switch tokens := strings.Fields(line); len(tokens) {
|
||||
case 0:
|
||||
// Nothing to complete
|
||||
case 1:
|
||||
// Single token -- complete command name
|
||||
found := ""
|
||||
for c := range cs {
|
||||
if strings.HasPrefix(c, tokens[0]) {
|
||||
if found != "" {
|
||||
return line
|
||||
}
|
||||
found = c
|
||||
}
|
||||
}
|
||||
if found != "" {
|
||||
return found
|
||||
}
|
||||
default:
|
||||
// Multiple tokens -- complete using command completer
|
||||
if c, ok := cs[tokens[0]]; ok {
|
||||
if c.Complete != nil {
|
||||
lastTokenIdx := len(tokens) - 1
|
||||
lastToken := tokens[lastTokenIdx]
|
||||
if strings.HasPrefix(lastToken, "-") {
|
||||
lastToken = "-" + c.Complete(lastToken[1:])
|
||||
} else {
|
||||
lastToken = c.Complete(lastToken)
|
||||
}
|
||||
return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ")
|
||||
}
|
||||
}
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
|
||||
// awayFromTTY saves the output in a file if it would otherwise go to
|
||||
// the terminal screen. This is used to avoid dumping binary data on
|
||||
// the screen.
|
||||
func awayFromTTY(interactive **bool, format string) PostProcessor {
|
||||
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
|
||||
if output == os.Stdout && (ui.IsTerminal() || interactive != nil && **interactive) {
|
||||
tempFile, err := tempfile.New("", "profile", "."+format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ui.PrintErr("Generating report in ", tempFile.Name())
|
||||
_, err = fmt.Fprint(tempFile, input)
|
||||
return err
|
||||
}
|
||||
_, err := fmt.Fprint(output, input)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func invokeDot(interactive **bool, format string) PostProcessor {
|
||||
divert := awayFromTTY(interactive, format)
|
||||
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
|
||||
if _, err := exec.LookPath("dot"); err != nil {
|
||||
ui.PrintErr("Cannot find dot, have you installed Graphviz?")
|
||||
return err
|
||||
}
|
||||
cmd := exec.Command("dot", "-T"+format)
|
||||
var buf bytes.Buffer
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, &buf, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return divert(&buf, output, ui)
|
||||
}
|
||||
}
|
||||
|
||||
func saveSVGToFile(interactive **bool) PostProcessor {
|
||||
generateSVG := invokeDot(nil, "svg")
|
||||
divert := awayFromTTY(interactive, "svg")
|
||||
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
|
||||
baseSVG := &bytes.Buffer{}
|
||||
generateSVG(input, baseSVG, ui)
|
||||
massaged := &bytes.Buffer{}
|
||||
fmt.Fprint(massaged, svg.Massage(*baseSVG))
|
||||
return divert(massaged, output, ui)
|
||||
}
|
||||
}
|
||||
|
||||
var vizTmpDir string
|
||||
|
||||
func makeVizTmpDir() error {
|
||||
if vizTmpDir != "" {
|
||||
return nil
|
||||
}
|
||||
name, err := ioutil.TempDir("", "pprof-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempfile.DeferDelete(name)
|
||||
vizTmpDir = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func invokeVisualizer(interactive **bool, format PostProcessor, suffix string, visualizers []string) PostProcessor {
|
||||
return func(input *bytes.Buffer, output io.Writer, ui plugin.UI) error {
|
||||
if err := makeVizTmpDir(); err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := tempfile.New(vizTmpDir, "pprof", "."+suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempfile.DeferDelete(tempFile.Name())
|
||||
if err = format(input, tempFile, ui); err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile.Close() // on windows, if the file is Open, start cannot access it.
|
||||
// Try visualizers until one is successful
|
||||
for _, v := range visualizers {
|
||||
// Separate command and arguments for exec.Command.
|
||||
args := strings.Split(v, " ")
|
||||
if len(args) == 0 {
|
||||
continue
|
||||
}
|
||||
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
|
||||
viewer.Stderr = os.Stderr
|
||||
if err = viewer.Start(); err == nil {
|
||||
// The viewer might just send a message to another program
|
||||
// to open the file. Give that program a little time to open the
|
||||
// file before we remove it.
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
if !**interactive {
|
||||
// In command-line mode, wait for the viewer to be closed
|
||||
// before proceeding
|
||||
return viewer.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,492 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"cmd/pprof/internal/commands"
|
||||
"cmd/pprof/internal/plugin"
|
||||
"internal/pprof/profile"
|
||||
)
|
||||
|
||||
var profileFunctionNames = []string{}
|
||||
|
||||
// functionCompleter replaces provided substring with a function
|
||||
// name retrieved from a profile if a single match exists. Otherwise,
|
||||
// it returns unchanged substring. It defaults to no-op if the profile
|
||||
// is not specified.
|
||||
func functionCompleter(substring string) string {
|
||||
found := ""
|
||||
for _, fName := range profileFunctionNames {
|
||||
if strings.Contains(fName, substring) {
|
||||
if found != "" {
|
||||
return substring
|
||||
}
|
||||
found = fName
|
||||
}
|
||||
}
|
||||
if found != "" {
|
||||
return found
|
||||
}
|
||||
return substring
|
||||
}
|
||||
|
||||
// updateAutoComplete enhances autocompletion with information that can be
|
||||
// retrieved from the profile
|
||||
func updateAutoComplete(p *profile.Profile) {
|
||||
profileFunctionNames = nil // remove function names retrieved previously
|
||||
for _, fn := range p.Function {
|
||||
profileFunctionNames = append(profileFunctionNames, fn.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// splitCommand splits the command line input into tokens separated by
|
||||
// spaces. Takes care to separate commands of the form 'top10' into
|
||||
// two tokens: 'top' and '10'
|
||||
func splitCommand(input string) []string {
|
||||
fields := strings.Fields(input)
|
||||
if num := strings.IndexAny(fields[0], "0123456789"); num != -1 {
|
||||
inputNumber := fields[0][num:]
|
||||
fields[0] = fields[0][:num]
|
||||
fields = append([]string{fields[0], inputNumber}, fields[1:]...)
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// interactive displays a prompt and reads commands for profile
|
||||
// manipulation/visualization.
|
||||
func interactive(p *profile.Profile, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
|
||||
updateAutoComplete(p)
|
||||
|
||||
// Enter command processing loop.
|
||||
ui.Print("Entering interactive mode (type \"help\" for commands)")
|
||||
ui.SetAutoComplete(commands.NewCompleter(f.commands))
|
||||
|
||||
for {
|
||||
input, err := readCommand(p, ui, f)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if input == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Process simple commands.
|
||||
switch input {
|
||||
case "":
|
||||
continue
|
||||
case ":":
|
||||
f.flagFocus = newString("")
|
||||
f.flagIgnore = newString("")
|
||||
f.flagTagFocus = newString("")
|
||||
f.flagTagIgnore = newString("")
|
||||
f.flagHide = newString("")
|
||||
continue
|
||||
}
|
||||
|
||||
fields := splitCommand(input)
|
||||
// Process report generation commands.
|
||||
if _, ok := f.commands[fields[0]]; ok {
|
||||
if err := generateReport(p, fields, obj, ui, f); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
ui.PrintErr(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch cmd := fields[0]; cmd {
|
||||
case "help":
|
||||
commandHelp(fields, ui, f)
|
||||
continue
|
||||
case "exit", "quit":
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process option settings.
|
||||
if of, err := optFlags(p, input, f); err == nil {
|
||||
f = of
|
||||
} else {
|
||||
ui.PrintErr("Error: ", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func generateReport(p *profile.Profile, cmd []string, obj plugin.ObjTool, ui plugin.UI, f *flags) error {
|
||||
prof := p.Copy()
|
||||
|
||||
cf, err := cmdFlags(prof, cmd, ui, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return generate(true, prof, obj, ui, cf)
|
||||
}
|
||||
|
||||
// validateRegex checks if a string is a valid regular expression.
|
||||
func validateRegex(v string) error {
|
||||
_, err := regexp.Compile(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// readCommand prompts for and reads the next command.
|
||||
func readCommand(p *profile.Profile, ui plugin.UI, f *flags) (string, error) {
|
||||
//ui.Print("Options:\n", f.String(p))
|
||||
s, err := ui.ReadLine()
|
||||
return strings.TrimSpace(s), err
|
||||
}
|
||||
|
||||
func commandHelp(_ []string, ui plugin.UI, f *flags) error {
|
||||
help := `
|
||||
Commands:
|
||||
cmd [n] [--cum] [focus_regex]* [-ignore_regex]*
|
||||
Produce a text report with the top n entries.
|
||||
Include samples matching focus_regex, and exclude ignore_regex.
|
||||
Add --cum to sort using cumulative data.
|
||||
Available commands:
|
||||
`
|
||||
var commands []string
|
||||
for name, cmd := range f.commands {
|
||||
commands = append(commands, fmt.Sprintf(" %-12s %s", name, cmd.Usage))
|
||||
}
|
||||
sort.Strings(commands)
|
||||
|
||||
help = help + strings.Join(commands, "\n") + `
|
||||
peek func_regex
|
||||
Display callers and callees of functions matching func_regex.
|
||||
|
||||
dot [n] [focus_regex]* [-ignore_regex]* [>file]
|
||||
Produce an annotated callgraph with the top n entries.
|
||||
Include samples matching focus_regex, and exclude ignore_regex.
|
||||
For other outputs, replace dot with:
|
||||
- Graphic formats: dot, svg, pdf, ps, gif, png (use > to name output file)
|
||||
- Graph viewer: gv, web, evince, eog
|
||||
|
||||
callgrind [n] [focus_regex]* [-ignore_regex]* [>file]
|
||||
Produce a file in callgrind-compatible format.
|
||||
Include samples matching focus_regex, and exclude ignore_regex.
|
||||
|
||||
weblist func_regex [-ignore_regex]*
|
||||
Show annotated source with interspersed assembly in a web browser.
|
||||
|
||||
list func_regex [-ignore_regex]*
|
||||
Print source for routines matching func_regex, and exclude ignore_regex.
|
||||
|
||||
disasm func_regex [-ignore_regex]*
|
||||
Disassemble routines matching func_regex, and exclude ignore_regex.
|
||||
|
||||
tags tag_regex [-ignore_regex]*
|
||||
List tags with key:value matching tag_regex and exclude ignore_regex.
|
||||
|
||||
quit/exit/^D
|
||||
Exit pprof.
|
||||
|
||||
option=value
|
||||
The following options can be set individually:
|
||||
cum/flat: Sort entries based on cumulative or flat data
|
||||
call_tree: Build context-sensitive call trees
|
||||
nodecount: Max number of entries to display
|
||||
nodefraction: Min frequency ratio of nodes to display
|
||||
edgefraction: Min frequency ratio of edges to display
|
||||
focus/ignore: Regexp to include/exclude samples by name/file
|
||||
tagfocus/tagignore: Regexp or value range to filter samples by tag
|
||||
eg "1mb", "1mb:2mb", ":64kb"
|
||||
|
||||
functions: Level of aggregation for sample data
|
||||
files:
|
||||
lines:
|
||||
addresses:
|
||||
|
||||
unit: Measurement unit to use on reports
|
||||
|
||||
Sample value selection by index:
|
||||
sample_index: Index of sample value to display
|
||||
mean: Average sample value over first value
|
||||
|
||||
Sample value selection by name:
|
||||
alloc_space for heap profiles
|
||||
alloc_objects
|
||||
inuse_space
|
||||
inuse_objects
|
||||
|
||||
total_delay for contention profiles
|
||||
mean_delay
|
||||
contentions
|
||||
|
||||
: Clear focus/ignore/hide/tagfocus/tagignore`
|
||||
|
||||
ui.Print(help)
|
||||
return nil
|
||||
}
|
||||
|
||||
// cmdFlags parses the options of an interactive command and returns
|
||||
// an updated flags object.
|
||||
func cmdFlags(prof *profile.Profile, input []string, ui plugin.UI, f *flags) (*flags, error) {
|
||||
cf := *f
|
||||
|
||||
var focus, ignore string
|
||||
output := *cf.flagOutput
|
||||
nodeCount := *cf.flagNodeCount
|
||||
cmd := input[0]
|
||||
|
||||
// Update output flags based on parameters.
|
||||
tokens := input[1:]
|
||||
for p := 0; p < len(tokens); p++ {
|
||||
t := tokens[p]
|
||||
if t == "" {
|
||||
continue
|
||||
}
|
||||
if c, err := strconv.ParseInt(t, 10, 32); err == nil {
|
||||
nodeCount = int(c)
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case '>':
|
||||
if len(t) > 1 {
|
||||
output = t[1:]
|
||||
continue
|
||||
}
|
||||
// find next token
|
||||
for p++; p < len(tokens); p++ {
|
||||
if tokens[p] != "" {
|
||||
output = tokens[p]
|
||||
break
|
||||
}
|
||||
}
|
||||
case '-':
|
||||
if t == "--cum" || t == "-cum" {
|
||||
cf.flagCum = newBool(true)
|
||||
continue
|
||||
}
|
||||
ignore = catRegex(ignore, t[1:])
|
||||
default:
|
||||
focus = catRegex(focus, t)
|
||||
}
|
||||
}
|
||||
|
||||
pcmd, ok := f.commands[cmd]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Unexpected parse failure: %v", input)
|
||||
}
|
||||
// Reset flags
|
||||
cf.flagCommands = make(map[string]*bool)
|
||||
cf.flagParamCommands = make(map[string]*string)
|
||||
|
||||
if !pcmd.HasParam {
|
||||
cf.flagCommands[cmd] = newBool(true)
|
||||
|
||||
switch cmd {
|
||||
case "tags":
|
||||
cf.flagTagFocus = newString(focus)
|
||||
cf.flagTagIgnore = newString(ignore)
|
||||
default:
|
||||
cf.flagFocus = newString(catRegex(*cf.flagFocus, focus))
|
||||
cf.flagIgnore = newString(catRegex(*cf.flagIgnore, ignore))
|
||||
}
|
||||
} else {
|
||||
if focus == "" {
|
||||
focus = "."
|
||||
}
|
||||
cf.flagParamCommands[cmd] = newString(focus)
|
||||
cf.flagIgnore = newString(catRegex(*cf.flagIgnore, ignore))
|
||||
}
|
||||
|
||||
if nodeCount < 0 {
|
||||
switch cmd {
|
||||
case "text", "top":
|
||||
// Default text/top to 10 nodes on interactive mode
|
||||
nodeCount = 10
|
||||
default:
|
||||
nodeCount = 80
|
||||
}
|
||||
}
|
||||
|
||||
cf.flagNodeCount = newInt(nodeCount)
|
||||
cf.flagOutput = newString(output)
|
||||
|
||||
// Do regular flags processing
|
||||
if err := processFlags(prof, ui, &cf); err != nil {
|
||||
cf.usage(ui)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cf, nil
|
||||
}
|
||||
|
||||
func catRegex(a, b string) string {
|
||||
if a == "" {
|
||||
return b
|
||||
}
|
||||
if b == "" {
|
||||
return a
|
||||
}
|
||||
return a + "|" + b
|
||||
}
|
||||
|
||||
// optFlags parses an interactive option setting and returns
|
||||
// an updated flags object.
|
||||
func optFlags(p *profile.Profile, input string, f *flags) (*flags, error) {
|
||||
inputs := strings.SplitN(input, "=", 2)
|
||||
option := strings.ToLower(strings.TrimSpace(inputs[0]))
|
||||
var value string
|
||||
if len(inputs) == 2 {
|
||||
value = strings.TrimSpace(inputs[1])
|
||||
}
|
||||
|
||||
of := *f
|
||||
|
||||
var err error
|
||||
var bv bool
|
||||
var uv uint64
|
||||
var fv float64
|
||||
|
||||
switch option {
|
||||
case "cum":
|
||||
if bv, err = parseBool(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagCum = newBool(bv)
|
||||
case "flat":
|
||||
if bv, err = parseBool(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagCum = newBool(!bv)
|
||||
case "call_tree":
|
||||
if bv, err = parseBool(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagCallTree = newBool(bv)
|
||||
case "unit":
|
||||
of.flagDisplayUnit = newString(value)
|
||||
case "sample_index":
|
||||
if uv, err = strconv.ParseUint(value, 10, 32); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ix := int(uv); ix < 0 || ix >= len(p.SampleType) {
|
||||
return nil, fmt.Errorf("sample_index out of range [0..%d]", len(p.SampleType)-1)
|
||||
}
|
||||
of.flagSampleIndex = newInt(int(uv))
|
||||
case "mean":
|
||||
if bv, err = parseBool(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagMean = newBool(bv)
|
||||
case "nodecount":
|
||||
if uv, err = strconv.ParseUint(value, 10, 32); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagNodeCount = newInt(int(uv))
|
||||
case "nodefraction":
|
||||
if fv, err = strconv.ParseFloat(value, 64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagNodeFraction = newFloat64(fv)
|
||||
case "edgefraction":
|
||||
if fv, err = strconv.ParseFloat(value, 64); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagEdgeFraction = newFloat64(fv)
|
||||
case "focus":
|
||||
if err = validateRegex(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagFocus = newString(value)
|
||||
case "ignore":
|
||||
if err = validateRegex(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagIgnore = newString(value)
|
||||
case "tagfocus":
|
||||
if err = validateRegex(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagTagFocus = newString(value)
|
||||
case "tagignore":
|
||||
if err = validateRegex(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagTagIgnore = newString(value)
|
||||
case "hide":
|
||||
if err = validateRegex(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
of.flagHide = newString(value)
|
||||
case "addresses", "files", "lines", "functions":
|
||||
if bv, err = parseBool(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !bv {
|
||||
return nil, fmt.Errorf("select one of addresses/files/lines/functions")
|
||||
}
|
||||
setGranularityToggle(option, &of)
|
||||
default:
|
||||
if ix := findSampleIndex(p, "", option); ix >= 0 {
|
||||
of.flagSampleIndex = newInt(ix)
|
||||
} else if ix := findSampleIndex(p, "total_", option); ix >= 0 {
|
||||
of.flagSampleIndex = newInt(ix)
|
||||
of.flagMean = newBool(false)
|
||||
} else if ix := findSampleIndex(p, "mean_", option); ix >= 1 {
|
||||
of.flagSampleIndex = newInt(ix)
|
||||
of.flagMean = newBool(true)
|
||||
} else {
|
||||
return nil, fmt.Errorf("unrecognized command: %s", input)
|
||||
}
|
||||
}
|
||||
return &of, nil
|
||||
}
|
||||
|
||||
// parseBool parses a string as a boolean value.
|
||||
func parseBool(v string) (bool, error) {
|
||||
switch strings.ToLower(v) {
|
||||
case "true", "t", "yes", "y", "1", "":
|
||||
return true, nil
|
||||
case "false", "f", "no", "n", "0":
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf(`illegal input "%s" for bool value`, v)
|
||||
}
|
||||
|
||||
func findSampleIndex(p *profile.Profile, prefix, sampleType string) int {
|
||||
if !strings.HasPrefix(sampleType, prefix) {
|
||||
return -1
|
||||
}
|
||||
sampleType = strings.TrimPrefix(sampleType, prefix)
|
||||
for i, r := range p.SampleType {
|
||||
if r.Type == sampleType {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// setGranularityToggle manages the set of granularity options. These
|
||||
// operate as a toggle; turning one on turns the others off.
|
||||
func setGranularityToggle(o string, fl *flags) {
|
||||
t, f := newBool(true), newBool(false)
|
||||
fl.flagFunctions = f
|
||||
fl.flagFiles = f
|
||||
fl.flagLines = f
|
||||
fl.flagAddresses = f
|
||||
switch o {
|
||||
case "functions":
|
||||
fl.flagFunctions = t
|
||||
case "files":
|
||||
fl.flagFiles = t
|
||||
case "lines":
|
||||
fl.flagLines = t
|
||||
case "addresses":
|
||||
fl.flagAddresses = t
|
||||
default:
|
||||
panic(fmt.Errorf("unexpected option %s", o))
|
||||
}
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fetch provides an extensible mechanism to fetch a profile
|
||||
// from a data source.
|
||||
package fetch
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cmd/pprof/internal/plugin"
|
||||
"internal/pprof/profile"
|
||||
)
|
||||
|
||||
// FetchProfile reads from a data source (network, file) and generates a
|
||||
// profile.
|
||||
func FetchProfile(source string, timeout time.Duration) (*profile.Profile, error) {
|
||||
return Fetcher(source, timeout, plugin.StandardUI())
|
||||
}
|
||||
|
||||
// Fetcher is the plugin.Fetcher version of FetchProfile.
|
||||
func Fetcher(source string, timeout time.Duration, ui plugin.UI) (*profile.Profile, error) {
|
||||
var f io.ReadCloser
|
||||
var err error
|
||||
|
||||
url, err := url.Parse(source)
|
||||
if err == nil && url.Host != "" {
|
||||
f, err = FetchURL(source, timeout)
|
||||
} else {
|
||||
f, err = os.Open(source)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return profile.Parse(f)
|
||||
}
|
||||
|
||||
// FetchURL fetches a profile from a URL using HTTP.
|
||||
func FetchURL(source string, timeout time.Duration) (io.ReadCloser, error) {
|
||||
resp, err := httpGet(source, timeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http fetch: %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer resp.Body.Close()
|
||||
return nil, statusCodeError(resp)
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// PostURL issues a POST to a URL over HTTP.
|
||||
func PostURL(source, post string) ([]byte, error) {
|
||||
resp, err := http.Post(source, "application/octet-stream", strings.NewReader(post))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http post %s: %v", source, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, statusCodeError(resp)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
func statusCodeError(resp *http.Response) error {
|
||||
if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") {
|
||||
// error is from pprof endpoint
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
return fmt.Errorf("server response: %s - %s", resp.Status, body)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("server response: %s", resp.Status)
|
||||
}
|
||||
|
||||
// httpGet is a wrapper around http.Get; it is defined as a variable
|
||||
// so it can be redefined during for testing.
|
||||
var httpGet = func(source string, timeout time.Duration) (*http.Response, error) {
|
||||
url, err := url.Parse(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if url.Scheme == "https+insecure" {
|
||||
tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
url.Scheme = "https"
|
||||
source = url.String()
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: timeout + 5*time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
return client.Get(source)
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,454 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package report
|
||||
|
||||
// This file contains routines related to the generation of annotated
|
||||
// source listings.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"cmd/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
// printSource prints an annotated source listing, include all
|
||||
// functions with samples that match the regexp rpt.options.symbol.
|
||||
// The sources are sorted by function name and then by filename to
|
||||
// eliminate potential nondeterminism.
|
||||
func printSource(w io.Writer, rpt *Report) error {
|
||||
o := rpt.options
|
||||
g, err := newGraph(rpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Identify all the functions that match the regexp provided.
|
||||
// Group nodes for each matching function.
|
||||
var functions nodes
|
||||
functionNodes := make(map[string]nodes)
|
||||
for _, n := range g.ns {
|
||||
if !o.Symbol.MatchString(n.info.name) {
|
||||
continue
|
||||
}
|
||||
if functionNodes[n.info.name] == nil {
|
||||
functions = append(functions, n)
|
||||
}
|
||||
functionNodes[n.info.name] = append(functionNodes[n.info.name], n)
|
||||
}
|
||||
functions.sort(nameOrder)
|
||||
|
||||
fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total))
|
||||
for _, fn := range functions {
|
||||
name := fn.info.name
|
||||
|
||||
// Identify all the source files associated to this function.
|
||||
// Group nodes for each source file.
|
||||
var sourceFiles nodes
|
||||
fileNodes := make(map[string]nodes)
|
||||
for _, n := range functionNodes[name] {
|
||||
if n.info.file == "" {
|
||||
continue
|
||||
}
|
||||
if fileNodes[n.info.file] == nil {
|
||||
sourceFiles = append(sourceFiles, n)
|
||||
}
|
||||
fileNodes[n.info.file] = append(fileNodes[n.info.file], n)
|
||||
}
|
||||
|
||||
if len(sourceFiles) == 0 {
|
||||
fmt.Printf("No source information for %s\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
sourceFiles.sort(fileOrder)
|
||||
|
||||
// Print each file associated with this function.
|
||||
for _, fl := range sourceFiles {
|
||||
filename := fl.info.file
|
||||
fns := fileNodes[filename]
|
||||
flatSum, cumSum := sumNodes(fns)
|
||||
|
||||
fnodes, path, err := getFunctionSource(name, filename, fns, 0, 0)
|
||||
fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, path)
|
||||
fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
|
||||
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
|
||||
percentage(cumSum, rpt.total))
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(w, " Error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, fn := range fnodes {
|
||||
fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt), fn.info.lineno, fn.info.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// printWebSource prints an annotated source listing, include all
|
||||
// functions with samples that match the regexp rpt.options.symbol.
|
||||
func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
|
||||
o := rpt.options
|
||||
g, err := newGraph(rpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the regexp source can be parsed as an address, also match
|
||||
// functions that land on that address.
|
||||
var address *uint64
|
||||
if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
|
||||
address = &hex
|
||||
}
|
||||
|
||||
// Extract interesting symbols from binary files in the profile and
|
||||
// classify samples per symbol.
|
||||
symbols := symbolsFromBinaries(rpt.prof, g, o.Symbol, address, obj)
|
||||
symNodes := nodesPerSymbol(g.ns, symbols)
|
||||
|
||||
// Sort symbols for printing.
|
||||
var syms objSymbols
|
||||
for s := range symNodes {
|
||||
syms = append(syms, s)
|
||||
}
|
||||
sort.Sort(syms)
|
||||
|
||||
if len(syms) == 0 {
|
||||
return fmt.Errorf("no samples found on routines matching: %s", o.Symbol.String())
|
||||
}
|
||||
|
||||
printHeader(w, rpt)
|
||||
for _, s := range syms {
|
||||
name := s.sym.Name[0]
|
||||
// Identify sources associated to a symbol by examining
|
||||
// symbol samples. Classify samples per source file.
|
||||
var sourceFiles nodes
|
||||
fileNodes := make(map[string]nodes)
|
||||
for _, n := range symNodes[s] {
|
||||
if n.info.file == "" {
|
||||
continue
|
||||
}
|
||||
if fileNodes[n.info.file] == nil {
|
||||
sourceFiles = append(sourceFiles, n)
|
||||
}
|
||||
fileNodes[n.info.file] = append(fileNodes[n.info.file], n)
|
||||
}
|
||||
|
||||
if len(sourceFiles) == 0 {
|
||||
fmt.Printf("No source information for %s\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
sourceFiles.sort(fileOrder)
|
||||
|
||||
// Print each file associated with this function.
|
||||
for _, fl := range sourceFiles {
|
||||
filename := fl.info.file
|
||||
fns := fileNodes[filename]
|
||||
|
||||
asm := assemblyPerSourceLine(symbols, fns, filename, obj)
|
||||
start, end := sourceCoordinates(asm)
|
||||
|
||||
fnodes, path, err := getFunctionSource(name, filename, fns, start, end)
|
||||
if err != nil {
|
||||
fnodes, path = getMissingFunctionSource(filename, asm, start, end)
|
||||
}
|
||||
|
||||
flatSum, cumSum := sumNodes(fnodes)
|
||||
printFunctionHeader(w, name, path, flatSum, cumSum, rpt)
|
||||
for _, fn := range fnodes {
|
||||
printFunctionSourceLine(w, fn, asm[fn.info.lineno], rpt)
|
||||
}
|
||||
printFunctionClosing(w)
|
||||
}
|
||||
}
|
||||
printPageClosing(w)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourceCoordinates returns the lowest and highest line numbers from
|
||||
// a set of assembly statements.
|
||||
func sourceCoordinates(asm map[int]nodes) (start, end int) {
|
||||
for l := range asm {
|
||||
if start == 0 || l < start {
|
||||
start = l
|
||||
}
|
||||
if end == 0 || l > end {
|
||||
end = l
|
||||
}
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// assemblyPerSourceLine disassembles the binary containing a symbol
|
||||
// and classifies the assembly instructions according to its
|
||||
// corresponding source line, annotating them with a set of samples.
|
||||
func assemblyPerSourceLine(objSyms []*objSymbol, rs nodes, src string, obj plugin.ObjTool) map[int]nodes {
|
||||
assembly := make(map[int]nodes)
|
||||
// Identify symbol to use for this collection of samples.
|
||||
o := findMatchingSymbol(objSyms, rs)
|
||||
if o == nil {
|
||||
return assembly
|
||||
}
|
||||
|
||||
// Extract assembly for matched symbol
|
||||
insns, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End)
|
||||
if err != nil {
|
||||
return assembly
|
||||
}
|
||||
|
||||
srcBase := filepath.Base(src)
|
||||
anodes := annotateAssembly(insns, rs, o.base)
|
||||
var lineno = 0
|
||||
for _, an := range anodes {
|
||||
if filepath.Base(an.info.file) == srcBase {
|
||||
lineno = an.info.lineno
|
||||
}
|
||||
if lineno != 0 {
|
||||
assembly[lineno] = append(assembly[lineno], an)
|
||||
}
|
||||
}
|
||||
|
||||
return assembly
|
||||
}
|
||||
|
||||
// findMatchingSymbol looks for the symbol that corresponds to a set
|
||||
// of samples, by comparing their addresses.
|
||||
func findMatchingSymbol(objSyms []*objSymbol, ns nodes) *objSymbol {
|
||||
for _, n := range ns {
|
||||
for _, o := range objSyms {
|
||||
if o.sym.File == n.info.objfile &&
|
||||
o.sym.Start <= n.info.address-o.base &&
|
||||
n.info.address-o.base <= o.sym.End {
|
||||
return o
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// printHeader prints the page header for a weblist report.
|
||||
func printHeader(w io.Writer, rpt *Report) {
|
||||
fmt.Fprintln(w, weblistPageHeader)
|
||||
|
||||
var labels []string
|
||||
for _, l := range legendLabels(rpt) {
|
||||
labels = append(labels, template.HTMLEscapeString(l))
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, `<div class="legend">%s<br>Total: %s</div>`,
|
||||
strings.Join(labels, "<br>\n"),
|
||||
rpt.formatValue(rpt.total),
|
||||
)
|
||||
}
|
||||
|
||||
// printFunctionHeader prints a function header for a weblist report.
|
||||
func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, rpt *Report) {
|
||||
fmt.Fprintf(w, `<h1>%s</h1>%s
|
||||
<pre onClick="pprof_toggle_asm(event)">
|
||||
Total: %10s %10s (flat, cum) %s
|
||||
`,
|
||||
template.HTMLEscapeString(name), template.HTMLEscapeString(path),
|
||||
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
|
||||
percentage(cumSum, rpt.total))
|
||||
}
|
||||
|
||||
// printFunctionSourceLine prints a source line and the corresponding assembly.
|
||||
func printFunctionSourceLine(w io.Writer, fn *node, assembly nodes, rpt *Report) {
|
||||
if len(assembly) == 0 {
|
||||
fmt.Fprintf(w,
|
||||
"<span class=line> %6d</span> <span class=nop> %10s %10s %s </span>\n",
|
||||
fn.info.lineno,
|
||||
valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt),
|
||||
template.HTMLEscapeString(fn.info.name))
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w,
|
||||
"<span class=line> %6d</span> <span class=deadsrc> %10s %10s %s </span>",
|
||||
fn.info.lineno,
|
||||
valueOrDot(fn.flat, rpt), valueOrDot(fn.cum, rpt),
|
||||
template.HTMLEscapeString(fn.info.name))
|
||||
fmt.Fprint(w, "<span class=asm>")
|
||||
for _, an := range assembly {
|
||||
var fileline string
|
||||
class := "disasmloc"
|
||||
if an.info.file != "" {
|
||||
fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.info.file), an.info.lineno)
|
||||
if an.info.lineno != fn.info.lineno {
|
||||
class = "unimportant"
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, " %8s %10s %10s %8x: %-48s <span class=%s>%s</span>\n", "",
|
||||
valueOrDot(an.flat, rpt), valueOrDot(an.cum, rpt),
|
||||
an.info.address,
|
||||
template.HTMLEscapeString(an.info.name),
|
||||
class,
|
||||
template.HTMLEscapeString(fileline))
|
||||
}
|
||||
fmt.Fprintln(w, "</span>")
|
||||
}
|
||||
|
||||
// printFunctionClosing prints the end of a function in a weblist report.
|
||||
func printFunctionClosing(w io.Writer) {
|
||||
fmt.Fprintln(w, "</pre>")
|
||||
}
|
||||
|
||||
// printPageClosing prints the end of the page in a weblist report.
|
||||
func printPageClosing(w io.Writer) {
|
||||
fmt.Fprintln(w, weblistPageClosing)
|
||||
}
|
||||
|
||||
// getFunctionSource collects the sources of a function from a source
|
||||
// file and annotates it with the samples in fns. Returns the sources
|
||||
// as nodes, using the info.name field to hold the source code.
|
||||
func getFunctionSource(fun, file string, fns nodes, start, end int) (nodes, string, error) {
|
||||
f, file, err := adjustSourcePath(file)
|
||||
if err != nil {
|
||||
return nil, file, err
|
||||
}
|
||||
|
||||
lineNodes := make(map[int]nodes)
|
||||
|
||||
// Collect source coordinates from profile.
|
||||
const margin = 5 // Lines before first/after last sample.
|
||||
if start == 0 {
|
||||
if fns[0].info.startLine != 0 {
|
||||
start = fns[0].info.startLine
|
||||
} else {
|
||||
start = fns[0].info.lineno - margin
|
||||
}
|
||||
} else {
|
||||
start -= margin
|
||||
}
|
||||
if end == 0 {
|
||||
end = fns[0].info.lineno
|
||||
}
|
||||
end += margin
|
||||
for _, n := range fns {
|
||||
lineno := n.info.lineno
|
||||
nodeStart := n.info.startLine
|
||||
if nodeStart == 0 {
|
||||
nodeStart = lineno - margin
|
||||
}
|
||||
nodeEnd := lineno + margin
|
||||
if nodeStart < start {
|
||||
start = nodeStart
|
||||
} else if nodeEnd > end {
|
||||
end = nodeEnd
|
||||
}
|
||||
lineNodes[lineno] = append(lineNodes[lineno], n)
|
||||
}
|
||||
|
||||
var src nodes
|
||||
buf := bufio.NewReader(f)
|
||||
lineno := 1
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, file, err
|
||||
}
|
||||
if line == "" {
|
||||
// end was at or past EOF; that's okay
|
||||
break
|
||||
}
|
||||
}
|
||||
if lineno >= start {
|
||||
flat, cum := sumNodes(lineNodes[lineno])
|
||||
|
||||
src = append(src, &node{
|
||||
info: nodeInfo{
|
||||
name: strings.TrimRight(line, "\n"),
|
||||
lineno: lineno,
|
||||
},
|
||||
flat: flat,
|
||||
cum: cum,
|
||||
})
|
||||
}
|
||||
lineno++
|
||||
if lineno > end {
|
||||
break
|
||||
}
|
||||
}
|
||||
return src, file, nil
|
||||
}
|
||||
|
||||
// getMissingFunctionSource creates a dummy function body to point to
|
||||
// the source file and annotates it with the samples in asm.
|
||||
func getMissingFunctionSource(filename string, asm map[int]nodes, start, end int) (nodes, string) {
|
||||
var fnodes nodes
|
||||
for i := start; i <= end; i++ {
|
||||
lrs := asm[i]
|
||||
if len(lrs) == 0 {
|
||||
continue
|
||||
}
|
||||
flat, cum := sumNodes(lrs)
|
||||
fnodes = append(fnodes, &node{
|
||||
info: nodeInfo{
|
||||
name: "???",
|
||||
lineno: i,
|
||||
},
|
||||
flat: flat,
|
||||
cum: cum,
|
||||
})
|
||||
}
|
||||
return fnodes, filename
|
||||
}
|
||||
|
||||
// adjustSourcePath adjusts the path for a source file by trimming
|
||||
// known prefixes and searching for the file on all parents of the
|
||||
// current working dir.
|
||||
func adjustSourcePath(path string) (*os.File, string, error) {
|
||||
path = trimPath(path)
|
||||
f, err := os.Open(path)
|
||||
if err == nil {
|
||||
return f, path, nil
|
||||
}
|
||||
|
||||
if dir, wderr := os.Getwd(); wderr == nil {
|
||||
for {
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
break
|
||||
}
|
||||
if f, err := os.Open(filepath.Join(parent, path)); err == nil {
|
||||
return f, filepath.Join(parent, path), nil
|
||||
}
|
||||
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
return nil, path, err
|
||||
}
|
||||
|
||||
// trimPath cleans up a path by removing prefixes that are commonly
|
||||
// found on profiles.
|
||||
func trimPath(path string) string {
|
||||
basePaths := []string{
|
||||
"/proc/self/cwd/./",
|
||||
"/proc/self/cwd/",
|
||||
}
|
||||
|
||||
sPath := filepath.ToSlash(path)
|
||||
|
||||
for _, base := range basePaths {
|
||||
if strings.HasPrefix(sPath, base) {
|
||||
return filepath.FromSlash(sPath[len(base):])
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
|
@ -1,195 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package symbolizer provides a routine to populate a profile with
|
||||
// symbol, file and line number information. It relies on the
|
||||
// addr2liner and demangler packages to do the actual work.
|
||||
package symbolizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"cmd/pprof/internal/plugin"
|
||||
"internal/pprof/profile"
|
||||
)
|
||||
|
||||
// Symbolize adds symbol and line number information to all locations
|
||||
// in a profile. mode enables some options to control
|
||||
// symbolization. Currently only recognizes "force", which causes it
|
||||
// to overwrite any existing data.
|
||||
func Symbolize(mode string, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
|
||||
force := false
|
||||
// Disable some mechanisms based on mode string.
|
||||
for _, o := range strings.Split(strings.ToLower(mode), ":") {
|
||||
switch o {
|
||||
case "force":
|
||||
force = true
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
if len(prof.Mapping) == 0 {
|
||||
return fmt.Errorf("no known mappings")
|
||||
}
|
||||
|
||||
mt, err := newMapping(prof, obj, ui, force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer mt.close()
|
||||
|
||||
functions := make(map[profile.Function]*profile.Function)
|
||||
for _, l := range mt.prof.Location {
|
||||
m := l.Mapping
|
||||
segment := mt.segments[m]
|
||||
if segment == nil {
|
||||
// Nothing to do
|
||||
continue
|
||||
}
|
||||
|
||||
stack, err := segment.SourceLine(l.Address)
|
||||
if err != nil || len(stack) == 0 {
|
||||
// No answers from addr2line
|
||||
continue
|
||||
}
|
||||
|
||||
l.Line = make([]profile.Line, len(stack))
|
||||
for i, frame := range stack {
|
||||
if frame.Func != "" {
|
||||
m.HasFunctions = true
|
||||
}
|
||||
if frame.File != "" {
|
||||
m.HasFilenames = true
|
||||
}
|
||||
if frame.Line != 0 {
|
||||
m.HasLineNumbers = true
|
||||
}
|
||||
f := &profile.Function{
|
||||
Name: frame.Func,
|
||||
SystemName: frame.Func,
|
||||
Filename: frame.File,
|
||||
}
|
||||
if fp := functions[*f]; fp != nil {
|
||||
f = fp
|
||||
} else {
|
||||
functions[*f] = f
|
||||
f.ID = uint64(len(mt.prof.Function)) + 1
|
||||
mt.prof.Function = append(mt.prof.Function, f)
|
||||
}
|
||||
l.Line[i] = profile.Line{
|
||||
Function: f,
|
||||
Line: int64(frame.Line),
|
||||
}
|
||||
}
|
||||
|
||||
if len(stack) > 0 {
|
||||
m.HasInlineFrames = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newMapping creates a mappingTable for a profile.
|
||||
func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) {
|
||||
mt := &mappingTable{
|
||||
prof: prof,
|
||||
segments: make(map[*profile.Mapping]plugin.ObjFile),
|
||||
}
|
||||
|
||||
// Identify used mappings
|
||||
mappings := make(map[*profile.Mapping]bool)
|
||||
for _, l := range prof.Location {
|
||||
mappings[l.Mapping] = true
|
||||
}
|
||||
|
||||
for _, m := range prof.Mapping {
|
||||
if !mappings[m] {
|
||||
continue
|
||||
}
|
||||
// Do not attempt to re-symbolize a mapping that has already been symbolized.
|
||||
if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) {
|
||||
continue
|
||||
}
|
||||
|
||||
f, err := locateFile(obj, m.File, m.BuildID, m.Start)
|
||||
if err != nil {
|
||||
ui.PrintErr("Local symbolization failed for ", filepath.Base(m.File), ": ", err)
|
||||
// Move on to other mappings
|
||||
continue
|
||||
}
|
||||
|
||||
if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID {
|
||||
// Build ID mismatch - ignore.
|
||||
f.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
mt.segments[m] = f
|
||||
}
|
||||
|
||||
return mt, nil
|
||||
}
|
||||
|
||||
// locateFile opens a local file for symbolization on the search path
|
||||
// at $PPROF_BINARY_PATH. Looks inside these directories for files
|
||||
// named $BUILDID/$BASENAME and $BASENAME (if build id is available).
|
||||
func locateFile(obj plugin.ObjTool, file, buildID string, start uint64) (plugin.ObjFile, error) {
|
||||
// Construct search path to examine
|
||||
searchPath := os.Getenv("PPROF_BINARY_PATH")
|
||||
if searchPath == "" {
|
||||
// Use $HOME/pprof/binaries as default directory for local symbolization binaries
|
||||
searchPath = filepath.Join(os.Getenv("HOME"), "pprof", "binaries")
|
||||
}
|
||||
|
||||
// Collect names to search: {buildid/basename, basename}
|
||||
var fileNames []string
|
||||
if baseName := filepath.Base(file); buildID != "" {
|
||||
fileNames = []string{filepath.Join(buildID, baseName), baseName}
|
||||
} else {
|
||||
fileNames = []string{baseName}
|
||||
}
|
||||
for _, path := range filepath.SplitList(searchPath) {
|
||||
for nameIndex, name := range fileNames {
|
||||
file := filepath.Join(path, name)
|
||||
if f, err := obj.Open(file, start); err == nil {
|
||||
fileBuildID := f.BuildID()
|
||||
if buildID == "" || buildID == fileBuildID {
|
||||
return f, nil
|
||||
}
|
||||
f.Close()
|
||||
if nameIndex == 0 {
|
||||
// If this is the first name, the path includes the build id. Report inconsistency.
|
||||
return nil, fmt.Errorf("found file %s with inconsistent build id %s", file, fileBuildID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Try original file name
|
||||
f, err := obj.Open(file, start)
|
||||
if err == nil && buildID != "" {
|
||||
if fileBuildID := f.BuildID(); fileBuildID != "" && fileBuildID != buildID {
|
||||
// Mismatched build IDs, ignore
|
||||
f.Close()
|
||||
return nil, fmt.Errorf("mismatched build ids %s != %s", fileBuildID, buildID)
|
||||
}
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// mappingTable contains the mechanisms for symbolization of a
|
||||
// profile.
|
||||
type mappingTable struct {
|
||||
prof *profile.Profile
|
||||
segments map[*profile.Mapping]plugin.ObjFile
|
||||
}
|
||||
|
||||
// Close releases any external processes being used for the mapping.
|
||||
func (mt *mappingTable) close() {
|
||||
for _, segment := range mt.segments {
|
||||
segment.Close()
|
||||
}
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package symbolz symbolizes a profile using the output from the symbolz
|
||||
// service.
|
||||
package symbolz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"internal/pprof/profile"
|
||||
)
|
||||
|
||||
var (
|
||||
symbolzRE = regexp.MustCompile(`(0x[[:xdigit:]]+)\s+(.*)`)
|
||||
)
|
||||
|
||||
// Symbolize symbolizes profile p by parsing data returned by a
|
||||
// symbolz handler. syms receives the symbolz query (hex addresses
|
||||
// separated by '+') and returns the symbolz output in a string. It
|
||||
// symbolizes all locations based on their addresses, regardless of
|
||||
// mapping.
|
||||
func Symbolize(source string, syms func(string, string) ([]byte, error), p *profile.Profile) error {
|
||||
if source = symbolz(source, p); source == "" {
|
||||
// If the source is not a recognizable URL, do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct query of addresses to symbolize.
|
||||
var a []string
|
||||
for _, l := range p.Location {
|
||||
if l.Address != 0 && len(l.Line) == 0 {
|
||||
a = append(a, fmt.Sprintf("%#x", l.Address))
|
||||
}
|
||||
}
|
||||
|
||||
if len(a) == 0 {
|
||||
// No addresses to symbolize.
|
||||
return nil
|
||||
}
|
||||
lines := make(map[uint64]profile.Line)
|
||||
functions := make(map[string]*profile.Function)
|
||||
if b, err := syms(source, strings.Join(a, "+")); err == nil {
|
||||
buf := bytes.NewBuffer(b)
|
||||
for {
|
||||
l, err := buf.ReadString('\n')
|
||||
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if symbol := symbolzRE.FindStringSubmatch(l); len(symbol) == 3 {
|
||||
addr, err := strconv.ParseUint(symbol[1], 0, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected parse failure %s: %v", symbol[1], err)
|
||||
}
|
||||
|
||||
name := symbol[2]
|
||||
fn := functions[name]
|
||||
if fn == nil {
|
||||
fn = &profile.Function{
|
||||
ID: uint64(len(p.Function) + 1),
|
||||
Name: name,
|
||||
SystemName: name,
|
||||
}
|
||||
functions[name] = fn
|
||||
p.Function = append(p.Function, fn)
|
||||
}
|
||||
|
||||
lines[addr] = profile.Line{Function: fn}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range p.Location {
|
||||
if line, ok := lines[l.Address]; ok {
|
||||
l.Line = []profile.Line{line}
|
||||
if l.Mapping != nil {
|
||||
l.Mapping.HasFunctions = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// symbolz returns the corresponding symbolz source for a profile URL.
|
||||
func symbolz(source string, p *profile.Profile) string {
|
||||
if url, err := url.Parse(source); err == nil && url.Host != "" {
|
||||
if last := strings.LastIndex(url.Path, "/"); last != -1 {
|
||||
if strings.HasSuffix(url.Path[:last], "pprof") {
|
||||
url.Path = url.Path[:last] + "/symbol"
|
||||
} else {
|
||||
url.Path = url.Path[:last] + "/symbolz"
|
||||
}
|
||||
return url.String()
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tempfile provides tools to create and delete temporary files
|
||||
package tempfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// New returns an unused filename for output files.
|
||||
func New(dir, prefix, suffix string) (*os.File, error) {
|
||||
for index := 1; index < 10000; index++ {
|
||||
path := filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix))
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return os.Create(path)
|
||||
}
|
||||
}
|
||||
// Give up
|
||||
return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix)
|
||||
}
|
||||
|
||||
var tempFiles []string
|
||||
var tempFilesMu = sync.Mutex{}
|
||||
|
||||
// DeferDelete marks a file or directory to be deleted by next call to Cleanup.
|
||||
func DeferDelete(path string) {
|
||||
tempFilesMu.Lock()
|
||||
tempFiles = append(tempFiles, path)
|
||||
tempFilesMu.Unlock()
|
||||
}
|
||||
|
||||
// Cleanup removes any temporary files or directories selected for deferred cleaning.
|
||||
// Similar to defer semantics, the nodes are deleted in LIFO order.
|
||||
func Cleanup() {
|
||||
tempFilesMu.Lock()
|
||||
for i := len(tempFiles) - 1; i >= 0; i-- {
|
||||
os.Remove(tempFiles[i])
|
||||
}
|
||||
tempFiles = nil
|
||||
tempFilesMu.Unlock()
|
||||
}
|
|
@ -2,137 +2,138 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// pprof is a tool for visualization of profile.data. It is based on
|
||||
// the upstream version at github.com/google/pprof, with minor
|
||||
// modifications specific to the Go distribution. Please consider
|
||||
// upstreaming any modifications to these packages.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"debug/dwarf"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cmd/internal/objfile"
|
||||
"cmd/pprof/internal/commands"
|
||||
"cmd/pprof/internal/driver"
|
||||
"cmd/pprof/internal/fetch"
|
||||
"cmd/pprof/internal/plugin"
|
||||
"cmd/pprof/internal/symbolizer"
|
||||
"cmd/pprof/internal/symbolz"
|
||||
"internal/pprof/profile"
|
||||
"github.com/google/pprof/driver"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var extraCommands map[string]*commands.Command // no added Go-specific commands
|
||||
if err := driver.PProf(flags{}, fetch.Fetcher, symbolize, new(objTool), plugin.StandardUI(), extraCommands); err != nil {
|
||||
options := &driver.Options{
|
||||
Fetch: new(fetcher),
|
||||
Obj: new(objTool),
|
||||
}
|
||||
if err := driver.PProf(options); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
// symbolize attempts to symbolize profile p.
|
||||
// If the source is a local binary, it tries using symbolizer and obj.
|
||||
// If the source is a URL, it fetches symbol information using symbolz.
|
||||
func symbolize(mode, source string, p *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
|
||||
remote, local := true, true
|
||||
for _, o := range strings.Split(strings.ToLower(mode), ":") {
|
||||
switch o {
|
||||
case "none", "no":
|
||||
return nil
|
||||
case "local":
|
||||
remote, local = false, true
|
||||
case "remote":
|
||||
remote, local = true, false
|
||||
default:
|
||||
ui.PrintErr("ignoring unrecognized symbolization option: " + mode)
|
||||
ui.PrintErr("expecting -symbolize=[local|remote|none][:force]")
|
||||
fallthrough
|
||||
case "", "force":
|
||||
// -force is recognized by symbolizer.Symbolize.
|
||||
// If the source is remote, and the mapping file
|
||||
// does not exist, don't use local symbolization.
|
||||
if isRemote(source) {
|
||||
if len(p.Mapping) == 0 {
|
||||
local = false
|
||||
} else if _, err := os.Stat(p.Mapping[0].File); err != nil {
|
||||
local = false
|
||||
}
|
||||
type fetcher struct {
|
||||
}
|
||||
|
||||
func (f *fetcher) Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) {
|
||||
sourceURL, timeout := adjustURL(src, duration, timeout)
|
||||
if sourceURL == "" {
|
||||
// Could not recognize URL, let regular pprof attempt to fetch the profile (eg. from a file)
|
||||
return nil, "", nil
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, "Fetching profile over HTTP from", sourceURL)
|
||||
if duration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Please wait... (%v)\n", duration)
|
||||
}
|
||||
p, err := getProfile(sourceURL, timeout)
|
||||
return p, sourceURL, err
|
||||
}
|
||||
|
||||
func getProfile(source string, timeout time.Duration) (*profile.Profile, error) {
|
||||
url, err := url.Parse(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if url.Scheme == "https+insecure" {
|
||||
tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
url.Scheme = "https"
|
||||
source = url.String()
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: timeout + 5*time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
resp, err := client.Get(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("server response: %s", resp.Status)
|
||||
}
|
||||
return profile.Parse(resp.Body)
|
||||
}
|
||||
|
||||
// cpuProfileHandler is the Go pprof CPU profile handler URL.
|
||||
const cpuProfileHandler = "/debug/pprof/profile"
|
||||
|
||||
// adjustURL applies the duration/timeout values and Go specific defaults
|
||||
func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) {
|
||||
u, err := url.Parse(source)
|
||||
if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") {
|
||||
// Try adding http:// to catch sources of the form hostname:port/path.
|
||||
// url.Parse treats "hostname" as the scheme.
|
||||
u, err = url.Parse("http://" + source)
|
||||
}
|
||||
if err != nil || u.Host == "" {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
if u.Path == "" || u.Path == "/" {
|
||||
u.Path = cpuProfileHandler
|
||||
}
|
||||
|
||||
// Apply duration/timeout overrides to URL.
|
||||
values := u.Query()
|
||||
if duration > 0 {
|
||||
values.Set("seconds", fmt.Sprint(int(duration.Seconds())))
|
||||
} else {
|
||||
if urlSeconds := values.Get("seconds"); urlSeconds != "" {
|
||||
if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil {
|
||||
duration = time.Duration(us) * time.Second
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if local {
|
||||
// Symbolize using binutils.
|
||||
if err = symbolizer.Symbolize(mode, p, obj, ui); err == nil {
|
||||
return nil
|
||||
if timeout <= 0 {
|
||||
if duration > 0 {
|
||||
timeout = duration + duration/2
|
||||
} else {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
}
|
||||
if remote {
|
||||
err = symbolz.Symbolize(source, fetch.PostURL, p)
|
||||
}
|
||||
return err
|
||||
u.RawQuery = values.Encode()
|
||||
return u.String(), timeout
|
||||
}
|
||||
|
||||
// isRemote returns whether source is a URL for a remote source.
|
||||
func isRemote(source string) bool {
|
||||
url, err := url.Parse(source)
|
||||
if err != nil {
|
||||
url, err = url.Parse("http://" + source)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if scheme := strings.ToLower(url.Scheme); scheme == "" || scheme == "file" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// flags implements the driver.FlagPackage interface using the builtin flag package.
|
||||
type flags struct {
|
||||
}
|
||||
|
||||
func (flags) Bool(o string, d bool, c string) *bool {
|
||||
return flag.Bool(o, d, c)
|
||||
}
|
||||
|
||||
func (flags) Int(o string, d int, c string) *int {
|
||||
return flag.Int(o, d, c)
|
||||
}
|
||||
|
||||
func (flags) Float64(o string, d float64, c string) *float64 {
|
||||
return flag.Float64(o, d, c)
|
||||
}
|
||||
|
||||
func (flags) String(o, d, c string) *string {
|
||||
return flag.String(o, d, c)
|
||||
}
|
||||
|
||||
func (flags) Parse(usage func()) []string {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func (flags) ExtraUsage() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// objTool implements plugin.ObjTool using Go libraries
|
||||
// objTool implements driver.ObjTool using Go libraries
|
||||
// (instead of invoking GNU binutils).
|
||||
type objTool struct {
|
||||
mu sync.Mutex
|
||||
disasmCache map[string]*objfile.Disasm
|
||||
}
|
||||
|
||||
func (*objTool) Open(name string, start uint64) (plugin.ObjFile, error) {
|
||||
func (*objTool) Open(name string, start, limit, offset uint64) (driver.ObjFile, error) {
|
||||
of, err := objfile.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -154,14 +155,14 @@ func (*objTool) Demangle(names []string) (map[string]string, error) {
|
|||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
func (t *objTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
|
||||
func (t *objTool) Disasm(file string, start, end uint64) ([]driver.Inst, error) {
|
||||
d, err := t.cachedDisasm(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var asm []plugin.Inst
|
||||
var asm []driver.Inst
|
||||
d.Decode(start, end, nil, func(pc, size uint64, file string, line int, text string) {
|
||||
asm = append(asm, plugin.Inst{Addr: pc, File: file, Line: line, Text: text})
|
||||
asm = append(asm, driver.Inst{Addr: pc, File: file, Line: line, Text: text})
|
||||
})
|
||||
return asm, nil
|
||||
}
|
||||
|
@ -194,7 +195,7 @@ func (*objTool) SetConfig(config string) {
|
|||
// Ignore entirely.
|
||||
}
|
||||
|
||||
// file implements plugin.ObjFile using Go libraries
|
||||
// file implements driver.ObjFile using Go libraries
|
||||
// (instead of invoking GNU binutils).
|
||||
// A file represents a single executable being analyzed.
|
||||
type file struct {
|
||||
|
@ -222,7 +223,7 @@ func (f *file) BuildID() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
func (f *file) SourceLine(addr uint64) ([]driver.Frame, error) {
|
||||
if f.pcln == nil {
|
||||
pcln, err := f.file.PCLineTable()
|
||||
if err != nil {
|
||||
|
@ -233,7 +234,7 @@ func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
|||
addr -= f.offset
|
||||
file, line, fn := f.pcln.PCToLine(addr)
|
||||
if fn != nil {
|
||||
frame := []plugin.Frame{
|
||||
frame := []driver.Frame{
|
||||
{
|
||||
Func: fn.Name,
|
||||
File: file,
|
||||
|
@ -254,7 +255,7 @@ func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
|||
// dwarfSourceLine tries to get file/line information using DWARF.
|
||||
// This is for C functions that appear in the profile.
|
||||
// Returns nil if there is no information available.
|
||||
func (f *file) dwarfSourceLine(addr uint64) []plugin.Frame {
|
||||
func (f *file) dwarfSourceLine(addr uint64) []driver.Frame {
|
||||
if f.dwarf == nil && !f.triedDwarf {
|
||||
// Ignore any error--we don't care exactly why there
|
||||
// is no DWARF info.
|
||||
|
@ -277,7 +278,7 @@ func (f *file) dwarfSourceLine(addr uint64) []plugin.Frame {
|
|||
|
||||
// dwarfSourceLineEntry tries to get file/line information from a
|
||||
// DWARF compilation unit. Returns nil if it doesn't find anything.
|
||||
func (f *file) dwarfSourceLineEntry(r *dwarf.Reader, entry *dwarf.Entry, addr uint64) []plugin.Frame {
|
||||
func (f *file) dwarfSourceLineEntry(r *dwarf.Reader, entry *dwarf.Entry, addr uint64) []driver.Frame {
|
||||
lines, err := f.dwarf.LineReader(entry)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
@ -311,7 +312,7 @@ FindName:
|
|||
|
||||
// TODO: Report inlined functions.
|
||||
|
||||
frames := []plugin.Frame{
|
||||
frames := []driver.Frame{
|
||||
{
|
||||
Func: name,
|
||||
File: lentry.File.Name,
|
||||
|
@ -322,7 +323,7 @@ FindName:
|
|||
return frames
|
||||
}
|
||||
|
||||
func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
||||
func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*driver.Sym, error) {
|
||||
if f.sym == nil {
|
||||
sym, err := f.file.Symbols()
|
||||
if err != nil {
|
||||
|
@ -330,7 +331,7 @@ func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
|||
}
|
||||
f.sym = sym
|
||||
}
|
||||
var out []*plugin.Sym
|
||||
var out []*driver.Sym
|
||||
for _, s := range f.sym {
|
||||
// Ignore a symbol with address 0 and size 0.
|
||||
// An ELF STT_FILE symbol will look like that.
|
||||
|
@ -338,7 +339,7 @@ func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
|||
continue
|
||||
}
|
||||
if (r == nil || r.MatchString(s.Name)) && (addr == 0 || s.Addr <= addr && addr < s.Addr+uint64(s.Size)) {
|
||||
out = append(out, &plugin.Sym{
|
||||
out = append(out, &driver.Sym{
|
||||
Name: []string{s.Name},
|
||||
File: f.name,
|
||||
Start: s.Addr,
|
||||
|
|
7
src/cmd/vendor/github.com/google/pprof/AUTHORS
generated
vendored
Normal file
7
src/cmd/vendor/github.com/google/pprof/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
# This is the official list of pprof authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
Google Inc.
|
27
src/cmd/vendor/github.com/google/pprof/CONTRIBUTING
generated
vendored
Normal file
27
src/cmd/vendor/github.com/google/pprof/CONTRIBUTING
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Want to contribute? Great! First, read this page (including the small print at the end).
|
||||
|
||||
### Before you contribute
|
||||
Before we can use your code, you must sign the
|
||||
[Google Individual Contributor License Agreement]
|
||||
(https://cla.developers.google.com/about/google-individual)
|
||||
(CLA), which you can do online. The CLA is necessary mainly because you own the
|
||||
copyright to your changes, even after your contribution becomes part of our
|
||||
codebase, so we need your permission to use and distribute your code. We also
|
||||
need to be sure of various other things—for instance that you'll tell us if you
|
||||
know that your code infringes on other people's patents. You don't have to sign
|
||||
the CLA until after you've submitted your code for review and a member has
|
||||
approved it, but you must do it before we can put your code into our codebase.
|
||||
Before you start working on a larger contribution, you should get in touch with
|
||||
us first through the issue tracker with your idea so that we can help out and
|
||||
possibly guide you. Coordinating up front makes it much easier to avoid
|
||||
frustration later on.
|
||||
|
||||
### Code reviews
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use Github pull requests for this purpose.
|
||||
|
||||
### The small print
|
||||
Contributions made by corporations are covered by a different agreement than
|
||||
the one above, the
|
||||
[Software Grant and Corporate Contributor License Agreement]
|
||||
(https://cla.developers.google.com/about/google-corporate).
|
14
src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS
generated
vendored
Normal file
14
src/cmd/vendor/github.com/google/pprof/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
Raul Silvera <rsilvera@google.com>
|
||||
Tipp Moseley <tipp@google.com>
|
||||
Hyoun Kyu Cho <netforce@google.com>
|
||||
|
202
src/cmd/vendor/github.com/google/pprof/LICENSE
generated
vendored
Normal file
202
src/cmd/vendor/github.com/google/pprof/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
86
src/cmd/vendor/github.com/google/pprof/README.md
generated
vendored
Normal file
86
src/cmd/vendor/github.com/google/pprof/README.md
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
# Introduction
|
||||
|
||||
pprof is a tool for visualization and analysis of profiling data.
|
||||
|
||||
pprof reads a collection of profiling samples in profile.proto format and
|
||||
generates reports to visualize and help analyze the data. It can generate both
|
||||
text and graphical reports (through the use of the dot visualization package).
|
||||
|
||||
profile.proto is a protocol buffer that describes a set of callstacks
|
||||
and symbolization information. A common usage is to represent a set of
|
||||
sampled callstacks from statistical profiling. The format is
|
||||
described on the [proto/profile.proto](./proto/profile.proto) file. For details on protocol
|
||||
buffers, see https://developers.google.com/protocol-buffers
|
||||
|
||||
Profiles can be read from a local file, or over http. Multiple
|
||||
profiles of the same type can be aggregated or compared.
|
||||
|
||||
If the profile samples contain machine addresses, pprof can symbolize
|
||||
them through the use of the native binutils tools (addr2line and nm).
|
||||
|
||||
**This is not an official Google product.**
|
||||
|
||||
# Building pprof
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Go development kit. Known to work with Go 1.5.
|
||||
Follow [these instructions](http://golang.org/doc/code.html) to install the
|
||||
go tool and set up GOPATH.
|
||||
|
||||
- Graphviz: http://www.graphviz.org/
|
||||
Optional, used to generate graphic visualizations of profiles
|
||||
|
||||
To build and install it, use the `go get` tool.
|
||||
|
||||
go get github.com/google/pprof
|
||||
|
||||
# Basic usage
|
||||
|
||||
pprof can read a profile from a file or directly from a server via http.
|
||||
Specify the profile input(s) in the command line, and use options to
|
||||
indicate how to format the report.
|
||||
|
||||
## Generate a text report of the profile, sorted by hotness:
|
||||
|
||||
```
|
||||
% pprof -top [main_binary] profile.pb.gz
|
||||
Where
|
||||
main_binary: Local path to the main program binary, to enable symbolization
|
||||
profile.pb.gz: Local path to the profile in a compressed protobuf, or
|
||||
URL to the http service that serves a profile.
|
||||
```
|
||||
|
||||
## Generate a graph in an SVG file, and open it with a web browser:
|
||||
|
||||
```
|
||||
pprof -web [main_binary] profile.pb.gz
|
||||
```
|
||||
|
||||
## Run pprof on interactive mode:
|
||||
|
||||
If no output formatting option is specified, pprof runs on interactive mode,
|
||||
where reads the profile and accepts interactive commands for visualization and
|
||||
refinement of the profile.
|
||||
|
||||
```
|
||||
pprof [main_binary] profile.pb.gz
|
||||
|
||||
This will open a simple shell that takes pprof commands to generate reports.
|
||||
Type 'help' for available commands/options.
|
||||
```
|
||||
|
||||
## Using pprof with Linux Perf
|
||||
|
||||
pprof can read `perf.data` files generated by the
|
||||
[Linux perf](https://perf.wiki.kernel.org/index.php) tool by using the
|
||||
`perf_to_profile` program from the
|
||||
[perf_data_converter](http://github.com/google/perf_data_converter) package.
|
||||
|
||||
## Further documentation
|
||||
|
||||
See [doc/pprof.md](doc/pprof.md) for more detailed end-user documentation.
|
||||
|
||||
See [doc/developer/pprof.dev.md](doc/developer/pprof.dev.md) for developer documentation.
|
||||
|
||||
See [doc/developer/profile.proto.md](doc/developer/profile.proto.md) for a description of the profile.proto format.
|
14
src/cmd/vendor/github.com/google/pprof/doc/developer/pprof.dev.md
generated
vendored
Normal file
14
src/cmd/vendor/github.com/google/pprof/doc/developer/pprof.dev.md
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
This is pprof's developer documentation. It discusses how to maintain and extend
|
||||
pprof. It has yet to be written.
|
||||
|
||||
# How is pprof code structured?
|
||||
|
||||
Internal vs external packages.
|
||||
|
||||
# External interface
|
||||
|
||||
## Plugins
|
||||
|
||||
## Legacy formats
|
||||
|
||||
# Overview of internal packages
|
147
src/cmd/vendor/github.com/google/pprof/doc/developer/profile.proto.md
generated
vendored
Normal file
147
src/cmd/vendor/github.com/google/pprof/doc/developer/profile.proto.md
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
This is a description of the profile.proto format.
|
||||
|
||||
# Overview
|
||||
|
||||
Profile.proto is a data representation for profile data. It is independent of
|
||||
the type of data being collected and the sampling process used to collect that
|
||||
data. On disk, it is represented as a gzip-compressed protocol buffer, described
|
||||
at src/proto/profile.proto
|
||||
|
||||
A profile in this context refers to a collection of samples, each one
|
||||
representing measurements performed at a certain point in the life of a job. A
|
||||
sample associates a set of measurement values with a list of locations, commonly
|
||||
representing the program call stack when the sample was taken.
|
||||
|
||||
Tools such as pprof analyze these samples and display this information in
|
||||
multiple forms, such as identifying hottest locations, building graphical call
|
||||
graphs or trees, etc.
|
||||
|
||||
# General structure of a profile
|
||||
|
||||
A profile is represented on a Profile message, which contain the following
|
||||
fields:
|
||||
|
||||
* *sample*: A profile sample, with the values measured and the associated call
|
||||
stack as a list of location ids. Samples with identical call stacks can be
|
||||
merged by adding their respective values, element by element.
|
||||
* *location*: A unique place in the program, commonly mapped to a single
|
||||
instruction address. It has a unique nonzero id, to be referenced from the
|
||||
samples. It contains source information in the form of lines, and a mapping id
|
||||
that points to a binary.
|
||||
* *function*: A program function as defined in the program source. It has a
|
||||
unique nonzero id, referenced from the location lines. It contains a
|
||||
human-readable name for the function (eg a C++ demangled name), a system name
|
||||
(eg a C++ mangled name), the name of the corresponding source file, and other
|
||||
function attributes.
|
||||
* *mapping*: A binary that is part of the program during the profile
|
||||
collection. It has a unique nonzero id, referenced from the locations. It
|
||||
includes details on how the binary was mapped during program execution. By
|
||||
convention the main program binary is the first mapping, followed by any
|
||||
shared libraries.
|
||||
* *string_table*: All strings in the profile are represented as indices into
|
||||
this repeating field. The first string is empty, so index == 0 always
|
||||
represents the empty string.
|
||||
|
||||
# Measurement values
|
||||
|
||||
Measurement values are represented as 64-bit integers. The profile contains an
|
||||
explicit description of each value represented, using a ValueType message, with
|
||||
two fields:
|
||||
|
||||
* *Type*: A human-readable description of the type semantics. For example “cpu”
|
||||
to represent CPU time, “wall” or “time” for wallclock time, or “memory” for
|
||||
bytes allocated.
|
||||
* *Unit*: A human-readable name of the unit represented by the 64-bit integer
|
||||
values. For example, it could be “nanoseconds” or “milliseconds” for a time
|
||||
value, or “bytes” or “megabytes” for a memory size. If this is just
|
||||
representing a number of events, the recommended unit name is “count”.
|
||||
|
||||
A profile can represent multiple measurements per sample, but all samples must
|
||||
have the same number and type of measurements. The actual values are stored in
|
||||
the Sample.value fields, each one described by the corresponding
|
||||
Profile.sample_type field.
|
||||
|
||||
Some profiles have a uniform period that describe the granularity of the data
|
||||
collection. For example, a CPU profile may have a period of 100ms, or a memory
|
||||
allocation profile may have a period of 512kb. Profiles can optionally describe
|
||||
such a value on the Profile.period and Profile.period_type fields. The profile
|
||||
period is meant for human consumption and does not affect the interpretation of
|
||||
the profiling data.
|
||||
|
||||
By convention, the first value on all profiles is the number of samples
|
||||
collected at this call stack, with unit “count”. Because the profile does not
|
||||
describe the sampling process beyond the optional period, it must include
|
||||
unsampled values for all measurements. For example, a CPU profile could have
|
||||
value[0] == samples, and value[1] == time in milliseconds.
|
||||
|
||||
## Locations, functions and mappings
|
||||
|
||||
Each sample lists the id of each location where the sample was collected, in
|
||||
bottom-up order. Each location has an explicit unique nonzero integer id,
|
||||
independent of its position in the profile, and holds additional information to
|
||||
identify the corresponding source.
|
||||
|
||||
The profile source is expected to perform any adjustment required to the
|
||||
locations in order to point to the calls in the stack. For example, if the
|
||||
profile source extracts the call stack by walking back over the program stack,
|
||||
it must adjust the instruction addresses to point to the actual call
|
||||
instruction, instead of the instruction that each call will return to.
|
||||
|
||||
Sources usually generate profiles that fall into these two categories:
|
||||
|
||||
* *Unsymbolized profiles*: These only contain instruction addresses, and are to
|
||||
be symbolized by a separate tool. It is critical for each location to point to
|
||||
a valid mapping, which will provide the information required for
|
||||
symbolization. These are used for profiles of compiled languages, such as C++
|
||||
and Go.
|
||||
|
||||
* *Symbolized profiles*: These contain all the symbol information available for
|
||||
the profile. Mappings and instruction addresses are optional for symbolized
|
||||
locations. These are used for profiles of interpreted or jitted languages,
|
||||
such as Java or Python. Also, the profile format allows the generation of
|
||||
mixed profiles, with symbolized and unsymbolized locations.
|
||||
|
||||
The symbol information is represented in the repeating lines field of the
|
||||
Location message. A location has multiple lines if it reflects multiple program
|
||||
sources, for example if representing inlined call stacks. Lines reference
|
||||
functions by their unique nonzero id, and the source line number within the
|
||||
source file listed by the function. A function contains the source attributes
|
||||
for a function, including its name, source file, etc. Functions include both a
|
||||
user and a system form of the name, for example to include C++ demangled and
|
||||
mangled names. For profiles where only a single name exists, both should be set
|
||||
to the same string.
|
||||
|
||||
Mappings are also referenced from locations by their unique nonzero id, and
|
||||
include all information needed to symbolize addresses within the mapping. It
|
||||
includes similar information to the Linux /proc/self/maps file. Locations
|
||||
associated to a mapping should have addresses that land between the mapping
|
||||
start and limit. Also, if available, mappings should include a build id to
|
||||
uniquely identify the version of the binary being used.
|
||||
|
||||
## Labels
|
||||
|
||||
Samples optionally contain labels, which are annotations to discriminate samples
|
||||
with identical locations. For example, a label can be used on a malloc profile
|
||||
to indicate allocation size, so two samples on the same call stack with sizes
|
||||
2MB and 4MB do not get merged into a single sample with two allocations and a
|
||||
size of 6MB.
|
||||
|
||||
Labels can be string-based or numeric. They are represented by the Label
|
||||
message, with a key identifying the label and either a string or numeric
|
||||
value. For numeric labels, by convention the key represents the measurement unit
|
||||
of the numeric value. So for the previous example, the samples would have labels
|
||||
{“bytes”, 2097152} and {“bytes”, 4194304}.
|
||||
|
||||
## Keep and drop expressions
|
||||
|
||||
Some profile sources may have knowledge of locations that are uninteresting or
|
||||
irrelevant. However, if symbolization is needed in order to identify these
|
||||
locations, the profile source may not be able to remove them when the profile is
|
||||
generated. The profile format provides a mechanism to identify these frames by
|
||||
name, through regular expressions.
|
||||
|
||||
These expressions must match the function name in its entirety. Frames that
|
||||
match Profile.drop\_frames will be dropped from the profile, along with any
|
||||
frames below it. Frames that match Profile.keep\_frames will be kept, even if
|
||||
they match drop\_frames.
|
||||
|
209
src/cmd/vendor/github.com/google/pprof/doc/pprof.md
generated
vendored
Normal file
209
src/cmd/vendor/github.com/google/pprof/doc/pprof.md
generated
vendored
Normal file
|
@ -0,0 +1,209 @@
|
|||
# pprof
|
||||
|
||||
pprof is a tool for visualization and analysis of profiling data.
|
||||
|
||||
pprof reads a collection of profiling samples in profile.proto format and
|
||||
generates reports to visualize and help analyze the data. It can generate both
|
||||
text and graphical reports (through the use of the dot visualization package).
|
||||
|
||||
profile.proto is a protocol buffer that describes a set of callstacks
|
||||
and symbolization information. A common usage is to represent a set of
|
||||
sampled callstacks from statistical profiling. The format is
|
||||
described on the src/proto/profile.proto file. For details on protocol
|
||||
buffers, see https://developers.google.com/protocol-buffers
|
||||
|
||||
Profiles can be read from a local file, or over http. Multiple
|
||||
profiles of the same type can be aggregated or compared.
|
||||
|
||||
If the profile samples contain machine addresses, pprof can symbolize
|
||||
them through the use of the native binutils tools (addr2line and nm).
|
||||
|
||||
# pprof profiles
|
||||
|
||||
pprof operates on data in the profile.proto format. Each profile is a collection
|
||||
of samples, where each sample is associated to a point in a location hierarchy,
|
||||
one or more numeric values, and a set of labels. Often these profiles represents
|
||||
data collected through statistical sampling of a program, so each sample
|
||||
describes a program call stack and a number or weight of samples collected at a
|
||||
location. pprof is agnostic to the profile semantics, so other uses are
|
||||
possible. The interpretation of the reports generated by pprof depends on the
|
||||
semantics defined by the source of the profile.
|
||||
|
||||
# General usage
|
||||
|
||||
The objective of pprof is to generate a report for a profile. The report is
|
||||
generated from a location hierarchy, which is reconstructed from the profile
|
||||
samples. Each location contains two values: *flat* is the value of the location
|
||||
itself, while *cum* is the value of the location plus all its
|
||||
descendants. Samples that include a location multiple times (eg for recursive
|
||||
functions) are counted only once per location.
|
||||
|
||||
The basic usage of pprof is
|
||||
|
||||
pprof <format> [options] source
|
||||
|
||||
Where *format* selects the nature of the report, and *options* configure the
|
||||
contents of the report. Each option has a value, which can be boolean, numeric,
|
||||
or strings. While only one format can be specified, most options can be selected
|
||||
independently of each other.
|
||||
|
||||
Some common pprof options are:
|
||||
|
||||
* **-flat [default]:** Sort entries based on their flat weight, on text reports.
|
||||
* **-cum:** Sort entries based on cumulative weight, on text reports.
|
||||
* **-functions [default]:** Accumulate samples at the function level; profile
|
||||
locations that describe the same function will be merged into a report entry.
|
||||
* **-lines:** Accumulate samples at the source line level; profile locations that
|
||||
describe the same function will be merged into a report entry.
|
||||
* **-addresses:** Accumulate samples at the instruction address; profile locations
|
||||
that describe the same function address will be merged into a report entry.
|
||||
* **-nodecount= _int_:** Maximum number of entries in the report. pprof will only print
|
||||
this many entries and will use heuristics to select which entries to trim.
|
||||
* **-focus= _regex_:** Only include samples that include a report entry matching
|
||||
*regex*.
|
||||
* **-ignore= _regex_:** Do not include samples that include a report entry matching
|
||||
*regex*.
|
||||
* **-show= _regex_:** Only show entries that match *regex*.
|
||||
* **-hide= _regex_:** Do not show entries that match *regex*.
|
||||
|
||||
Each sample in a profile may include multiple values, representing different
|
||||
entities associated to the sample. pprof reports include a single sample value,
|
||||
which by convention is the last one specified in the report. The `sample_index=`
|
||||
option selects which value to use, and can be set to a number (from 0 to the
|
||||
number of values - 1) or the name of the sample value.
|
||||
|
||||
Sample values are numeric values associated to a unit. If pprof can recognize
|
||||
these units, it will attempt to scale the values to a suitable unit for
|
||||
visualization. The `unite=` option will force the use of a specific unit. For
|
||||
example, `sample_index=sec` will force any time values to be reported in
|
||||
seconds. pprof recognizes most common time and memory size units.
|
||||
|
||||
## Text reports
|
||||
|
||||
pprof text reports show the location hierarchy in text format.
|
||||
|
||||
* **-text:** Prints the location entries, one per line, including the flat and cum
|
||||
values.
|
||||
* **-tree:** Prints each location entry with its predecessors and successors.
|
||||
* **-peek= _regex_:** Print the location entry with all its predecessors and
|
||||
successors, without trimming any entries.
|
||||
* **-traces:** Prints each sample with a location per line.
|
||||
|
||||
## Graphical reports
|
||||
|
||||
pprof can generate graphical reports on the DOT format, and convert them to
|
||||
multiple formats using the graphviz package.
|
||||
|
||||
These reports represent the location hierarchy as a graph, with a report entry
|
||||
represented as a node. Solid edges represent a direct connection between
|
||||
entries, while dotted edges represent a connection where some intermediate nodes
|
||||
have been removed. Nodes are removed using heuristics to limit the size of
|
||||
the graph, controlled by the *nodecount* option.
|
||||
|
||||
The size of each node represents the flat weight of the node, and the width of
|
||||
each edge represents the cumulative weight of all samples going through
|
||||
it. Nodes are colored according to their cumulative weight, highlighting the
|
||||
paths with the highest cum weight.
|
||||
|
||||
* **-dot:** Generates a report in .dot format. All other formats are generated from
|
||||
this one.
|
||||
* **-svg:** Generates a report in SVG format.
|
||||
* **-web:** Generates a report in SVG format on a temp file, and starts a web
|
||||
browser to view it.
|
||||
* **-png, -jpg, -gif, -pdf:** Generates a report in these formats,
|
||||
|
||||
## Annotated code
|
||||
|
||||
pprof can also generate reports of annotated source with samples associated to
|
||||
them. For these, the source or binaries must be locally available, and the
|
||||
profile must contain data with the appropriate level of detail.
|
||||
|
||||
pprof will look for source files on its current working directory and all its
|
||||
ancestors. pprof will look for binaries on the directories specified in the
|
||||
`$PPROF_BINARY_PATH` environment variable, by default `$HOME/pprof/binaries`. It
|
||||
will look binaries up by name, and if the profile includes linker build ids, it
|
||||
will also search for them in a directory named as the build id.
|
||||
|
||||
pprof uses the binutils tools to examine and disassemble the binaries. By
|
||||
default it will search for those tools in the current path, but it can also
|
||||
search for them in a directory pointed to by the environment variable
|
||||
`$PPROF_TOOLS`.
|
||||
|
||||
* **-disasm= _regex_:** Generates an annotated source listing for functions matching
|
||||
regex, with flat/cum weights for each source line.
|
||||
* **-list= _regex_:** Generates an annotated disassembly listing for functions
|
||||
matching *regex*.
|
||||
* **-weblist= _regex_:** Generates a source/assembly combined annotated listing for
|
||||
functions matching *regex*, and starts a web browser to display it.
|
||||
|
||||
# Fetching profiles
|
||||
|
||||
pprof can read profiles from a file or directly from a URL over http. Its native
|
||||
format is a gzipped profile.proto file, but it can also accept some legacy
|
||||
formats generated by [gperftools](https://github.com/gperftools/gperftools).
|
||||
|
||||
When fetching from a URL handler, pprof accepts options to indicate how much to
|
||||
wait for the profile.
|
||||
|
||||
* **-seconds= _int_:** Makes pprof request for a profile with the specified duration
|
||||
in seconds. Only makes sense for profiles based on elapsed time, such as CPU
|
||||
profiles.
|
||||
* **-timeout= _int_:** Makes pprof wait for the specified timeout when retrieving a
|
||||
profile over http. If not specified, pprof will use heuristics to determine a
|
||||
reasonable timeout.
|
||||
|
||||
If multiple profiles are specified, pprof will fetch them all and merge
|
||||
them. This is useful to combine profiles from multiple processes of a
|
||||
distributed job. The profiles may be from different programs but must be
|
||||
compatible (for example, CPU profiles cannot be combined with heap profiles).
|
||||
|
||||
pprof can subtract a profile from another in order to compare them. For that,
|
||||
use the **-base= _profile_** option, where *profile* is the filename or URL for the
|
||||
profile to be subtracted. This may result on some report entries having negative
|
||||
values.
|
||||
|
||||
## Symbolization
|
||||
|
||||
pprof can add symbol information to a profile that was collected only with
|
||||
address information. This is useful for profiles for compiled languages, where
|
||||
it may not be easy or even possible for the profile source to include function
|
||||
names or source coordinates.
|
||||
|
||||
pprof can extract the symbol information locally by examining the binaries using
|
||||
the binutils tools, or it can ask running jobs that provide a symbolization
|
||||
interface.
|
||||
|
||||
pprof will attempt symbolizing profiles by default, and its `-symbolize` option
|
||||
provides some control over symbolization:
|
||||
|
||||
* **-symbolize=none:** Disables any symbolization from pprof.
|
||||
|
||||
* **-symbolize=local:** Only attempts symbolizing the profile from local
|
||||
binaries using the binutils tools.
|
||||
|
||||
* **-symbolize=remote:** Only attempts to symbolize running jobs by contacting
|
||||
their symbolization handler.
|
||||
|
||||
For local symbolization, pprof will look for the binaries on the paths specified
|
||||
by the profile, and then it will search for them on the path specified by the
|
||||
environment variable `$PPROF_BINARY_PATH`. Also, the name of the main binary can
|
||||
be passed directly to pprof as its first parameter, to override the name or
|
||||
location of the main binary of the profile, like this:
|
||||
|
||||
pprof /path/to/binary profile.pb.gz
|
||||
|
||||
By default pprof will attempt to demangle and simplify C++ names, to provide
|
||||
readable names for C++ symbols. It will aggressively discard template and
|
||||
function parameters. This can be controlled with the `-symbolize=demangle`
|
||||
option. Note that for remote symbolization mangled names may not be provided by
|
||||
the symbolization handler.
|
||||
|
||||
* **--symbolize=demangle=none:** Do not perform any demangling. Show mangled
|
||||
names if available.
|
||||
|
||||
* **-symbolize=demangle=full:** Demangle, but do not perform any
|
||||
simplification. Show full demangled names if available.
|
||||
|
||||
* **-symbolize=demangle=templates:** Demangle, and trim function parameters, but
|
||||
not template parameters.
|
||||
|
281
src/cmd/vendor/github.com/google/pprof/driver/driver.go
generated
vendored
Normal file
281
src/cmd/vendor/github.com/google/pprof/driver/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,281 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package driver provides an external entry point to the pprof driver.
|
||||
package driver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
internaldriver "github.com/google/pprof/internal/driver"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
// PProf acquires a profile, and symbolizes it using a profile
|
||||
// manager. Then it generates a report formatted according to the
|
||||
// options selected through the flags package.
|
||||
func PProf(o *Options) error {
|
||||
return internaldriver.PProf(o.InternalOptions())
|
||||
}
|
||||
|
||||
func (o *Options) InternalOptions() *plugin.Options {
|
||||
var obj plugin.ObjTool
|
||||
if o.Obj != nil {
|
||||
obj = &internalObjTool{o.Obj}
|
||||
}
|
||||
var sym plugin.Symbolizer
|
||||
if o.Sym != nil {
|
||||
sym = &internalSymbolizer{o.Sym}
|
||||
}
|
||||
return &plugin.Options{
|
||||
o.Writer,
|
||||
o.Flagset,
|
||||
o.Fetch,
|
||||
sym,
|
||||
obj,
|
||||
o.UI,
|
||||
}
|
||||
}
|
||||
|
||||
// Options groups all the optional plugins into pprof.
|
||||
type Options struct {
|
||||
Writer Writer
|
||||
Flagset FlagSet
|
||||
Fetch Fetcher
|
||||
Sym Symbolizer
|
||||
Obj ObjTool
|
||||
UI UI
|
||||
}
|
||||
|
||||
// Writer provides a mechanism to write data under a certain name,
|
||||
// typically a filename.
|
||||
type Writer interface {
|
||||
Open(name string) (io.WriteCloser, error)
|
||||
}
|
||||
|
||||
// A FlagSet creates and parses command-line flags.
|
||||
// It is similar to the standard flag.FlagSet.
|
||||
type FlagSet interface {
|
||||
// Bool, Int, Float64, and String define new flags,
|
||||
// like the functions of the same name in package flag.
|
||||
Bool(name string, def bool, usage string) *bool
|
||||
Int(name string, def int, usage string) *int
|
||||
Float64(name string, def float64, usage string) *float64
|
||||
String(name string, def string, usage string) *string
|
||||
|
||||
// BoolVar, IntVar, Float64Var, and StringVar define new flags referencing
|
||||
// a given pointer, like the functions of the same name in package flag.
|
||||
BoolVar(pointer *bool, name string, def bool, usage string)
|
||||
IntVar(pointer *int, name string, def int, usage string)
|
||||
Float64Var(pointer *float64, name string, def float64, usage string)
|
||||
StringVar(pointer *string, name string, def string, usage string)
|
||||
|
||||
// StringList is similar to String but allows multiple values for a
|
||||
// single flag
|
||||
StringList(name string, def string, usage string) *[]*string
|
||||
|
||||
// ExtraUsage returns any additional text that should be
|
||||
// printed after the standard usage message.
|
||||
// The typical use of ExtraUsage is to show any custom flags
|
||||
// defined by the specific pprof plugins being used.
|
||||
ExtraUsage() string
|
||||
|
||||
// Parse initializes the flags with their values for this run
|
||||
// and returns the non-flag command line arguments.
|
||||
// If an unknown flag is encountered or there are no arguments,
|
||||
// Parse should call usage and return nil.
|
||||
Parse(usage func()) []string
|
||||
}
|
||||
|
||||
// A Fetcher reads and returns the profile named by src, using
|
||||
// the specified duration and timeout. It returns the fetched
|
||||
// profile and a string indicating a URL from where the profile
|
||||
// was fetched, which may be different than src.
|
||||
type Fetcher interface {
|
||||
Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error)
|
||||
}
|
||||
|
||||
// A Symbolizer introduces symbol information into a profile.
|
||||
type Symbolizer interface {
|
||||
Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error
|
||||
}
|
||||
|
||||
// MappingSources map each profile.Mapping to the source of the profile.
|
||||
// The key is either Mapping.File or Mapping.BuildId.
|
||||
type MappingSources map[string][]struct {
|
||||
Source string // URL of the source the mapping was collected from
|
||||
Start uint64 // delta applied to addresses from this source (to represent Merge adjustments)
|
||||
}
|
||||
|
||||
// An ObjTool inspects shared libraries and executable files.
|
||||
type ObjTool interface {
|
||||
// Open opens the named object file. If the object is a shared
|
||||
// library, start/limit/offset are the addresses where it is mapped
|
||||
// into memory in the address space being inspected.
|
||||
Open(file string, start, limit, offset uint64) (ObjFile, error)
|
||||
|
||||
// Disasm disassembles the named object file, starting at
|
||||
// the start address and stopping at (before) the end address.
|
||||
Disasm(file string, start, end uint64) ([]Inst, error)
|
||||
}
|
||||
|
||||
// An Inst is a single instruction in an assembly listing.
|
||||
type Inst struct {
|
||||
Addr uint64 // virtual address of instruction
|
||||
Text string // instruction text
|
||||
Function string // function name
|
||||
File string // source file
|
||||
Line int // source line
|
||||
}
|
||||
|
||||
// An ObjFile is a single object file: a shared library or executable.
|
||||
type ObjFile interface {
|
||||
// Name returns the underlying file name, if available.
|
||||
Name() string
|
||||
|
||||
// Base returns the base address to use when looking up symbols in the file.
|
||||
Base() uint64
|
||||
|
||||
// BuildID returns the GNU build ID of the file, or an empty string.
|
||||
BuildID() string
|
||||
|
||||
// SourceLine reports the source line information for a given
|
||||
// address in the file. Due to inlining, the source line information
|
||||
// is in general a list of positions representing a call stack,
|
||||
// with the leaf function first.
|
||||
SourceLine(addr uint64) ([]Frame, error)
|
||||
|
||||
// Symbols returns a list of symbols in the object file.
|
||||
// If r is not nil, Symbols restricts the list to symbols
|
||||
// with names matching the regular expression.
|
||||
// If addr is not zero, Symbols restricts the list to symbols
|
||||
// containing that address.
|
||||
Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error)
|
||||
|
||||
// Close closes the file, releasing associated resources.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// A Frame describes a single line in a source file.
|
||||
type Frame struct {
|
||||
Func string // name of function
|
||||
File string // source file name
|
||||
Line int // line in file
|
||||
}
|
||||
|
||||
// A Sym describes a single symbol in an object file.
|
||||
type Sym struct {
|
||||
Name []string // names of symbol (many if symbol was dedup'ed)
|
||||
File string // object file containing symbol
|
||||
Start uint64 // start virtual address
|
||||
End uint64 // virtual address of last byte in sym (Start+size-1)
|
||||
}
|
||||
|
||||
// A UI manages user interactions.
|
||||
type UI interface {
|
||||
// Read returns a line of text (a command) read from the user.
|
||||
// prompt is printed before reading the command.
|
||||
ReadLine(prompt string) (string, error)
|
||||
|
||||
// Print shows a message to the user.
|
||||
// It formats the text as fmt.Print would and adds a final \n if not already present.
|
||||
// For line-based UI, Print writes to standard error.
|
||||
// (Standard output is reserved for report data.)
|
||||
Print(...interface{})
|
||||
|
||||
// PrintErr shows an error message to the user.
|
||||
// It formats the text as fmt.Print would and adds a final \n if not already present.
|
||||
// For line-based UI, PrintErr writes to standard error.
|
||||
PrintErr(...interface{})
|
||||
|
||||
// IsTerminal returns whether the UI is known to be tied to an
|
||||
// interactive terminal (as opposed to being redirected to a file).
|
||||
IsTerminal() bool
|
||||
|
||||
// SetAutoComplete instructs the UI to call complete(cmd) to obtain
|
||||
// the auto-completion of cmd, if the UI supports auto-completion at all.
|
||||
SetAutoComplete(complete func(string) string)
|
||||
}
|
||||
|
||||
// internalObjTool is a wrapper to map from the pprof external
|
||||
// interface to the internal interface.
|
||||
type internalObjTool struct {
|
||||
ObjTool
|
||||
}
|
||||
|
||||
func (o *internalObjTool) Open(file string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
f, err := o.ObjTool.Open(file, start, limit, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &internalObjFile{f}, err
|
||||
}
|
||||
|
||||
type internalObjFile struct {
|
||||
ObjFile
|
||||
}
|
||||
|
||||
func (f *internalObjFile) SourceLine(frame uint64) ([]plugin.Frame, error) {
|
||||
frames, err := f.ObjFile.SourceLine(frame)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pluginFrames []plugin.Frame
|
||||
for _, f := range frames {
|
||||
pluginFrames = append(pluginFrames, plugin.Frame(f))
|
||||
}
|
||||
return pluginFrames, nil
|
||||
}
|
||||
|
||||
func (f *internalObjFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
||||
syms, err := f.ObjFile.Symbols(r, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pluginSyms []*plugin.Sym
|
||||
for _, s := range syms {
|
||||
ps := plugin.Sym(*s)
|
||||
pluginSyms = append(pluginSyms, &ps)
|
||||
}
|
||||
return pluginSyms, nil
|
||||
}
|
||||
|
||||
func (o *internalObjTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
|
||||
insts, err := o.ObjTool.Disasm(file, start, end)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pluginInst []plugin.Inst
|
||||
for _, inst := range insts {
|
||||
pluginInst = append(pluginInst, plugin.Inst(inst))
|
||||
}
|
||||
return pluginInst, nil
|
||||
}
|
||||
|
||||
// internalSymbolizer is a wrapper to map from the pprof external
|
||||
// interface to the internal interface.
|
||||
type internalSymbolizer struct {
|
||||
Symbolizer
|
||||
}
|
||||
|
||||
func (s *internalSymbolizer) Symbolize(mode string, srcs plugin.MappingSources, prof *profile.Profile) error {
|
||||
isrcs := plugin.MappingSources{}
|
||||
for m, s := range srcs {
|
||||
isrcs[m] = s
|
||||
}
|
||||
return s.Symbolize(mode, isrcs, prof)
|
||||
}
|
219
src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
generated
vendored
Normal file
219
src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAddr2line = "addr2line"
|
||||
|
||||
// addr2line may produce multiple lines of output. We
|
||||
// use this sentinel to identify the end of the output.
|
||||
sentinel = ^uint64(0)
|
||||
)
|
||||
|
||||
// addr2Liner is a connection to an addr2line command for obtaining
|
||||
// address and line number information from a binary.
|
||||
type addr2Liner struct {
|
||||
rw lineReaderWriter
|
||||
base uint64
|
||||
|
||||
// nm holds an NM based addr2Liner which can provide
|
||||
// better full names compared to addr2line, which often drops
|
||||
// namespaces etc. from the names it returns.
|
||||
nm *addr2LinerNM
|
||||
}
|
||||
|
||||
// lineReaderWriter is an interface to abstract the I/O to an addr2line
|
||||
// process. It writes a line of input to the job, and reads its output
|
||||
// one line at a time.
|
||||
type lineReaderWriter interface {
|
||||
write(string) error
|
||||
readLine() (string, error)
|
||||
close()
|
||||
}
|
||||
|
||||
type addr2LinerJob struct {
|
||||
cmd *exec.Cmd
|
||||
in io.WriteCloser
|
||||
out *bufio.Reader
|
||||
}
|
||||
|
||||
func (a *addr2LinerJob) write(s string) error {
|
||||
_, err := fmt.Fprint(a.in, s+"\n")
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *addr2LinerJob) readLine() (string, error) {
|
||||
return a.out.ReadString('\n')
|
||||
}
|
||||
|
||||
// close releases any resources used by the addr2liner object.
|
||||
func (a *addr2LinerJob) close() {
|
||||
a.in.Close()
|
||||
a.cmd.Wait()
|
||||
}
|
||||
|
||||
// newAddr2liner starts the given addr2liner command reporting
|
||||
// information about the given executable file. If file is a shared
|
||||
// library, base should be the address at which it was mapped in the
|
||||
// program under consideration.
|
||||
func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) {
|
||||
if cmd == "" {
|
||||
cmd = defaultAddr2line
|
||||
}
|
||||
|
||||
j := &addr2LinerJob{
|
||||
cmd: exec.Command(cmd, "-aif", "-e", file),
|
||||
}
|
||||
|
||||
var err error
|
||||
if j.in, err = j.cmd.StdinPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outPipe, err := j.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
j.out = bufio.NewReader(outPipe)
|
||||
if err := j.cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &addr2Liner{
|
||||
rw: j,
|
||||
base: base,
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (d *addr2Liner) readString() (string, error) {
|
||||
s, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(s), nil
|
||||
}
|
||||
|
||||
// readFrame parses the addr2line output for a single address. It
|
||||
// returns a populated plugin.Frame and whether it has reached the end of the
|
||||
// data.
|
||||
func (d *addr2Liner) readFrame() (plugin.Frame, bool) {
|
||||
funcname, err := d.readString()
|
||||
if err != nil {
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
if strings.HasPrefix(funcname, "0x") {
|
||||
// If addr2line returns a hex address we can assume it is the
|
||||
// sentinel. Read and ignore next two lines of output from
|
||||
// addr2line
|
||||
d.readString()
|
||||
d.readString()
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
|
||||
fileline, err := d.readString()
|
||||
if err != nil {
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
|
||||
linenumber := 0
|
||||
|
||||
if funcname == "??" {
|
||||
funcname = ""
|
||||
}
|
||||
|
||||
if fileline == "??:0" {
|
||||
fileline = ""
|
||||
} else {
|
||||
if i := strings.LastIndex(fileline, ":"); i >= 0 {
|
||||
// Remove discriminator, if present
|
||||
if disc := strings.Index(fileline, " (discriminator"); disc > 0 {
|
||||
fileline = fileline[:disc]
|
||||
}
|
||||
// If we cannot parse a number after the last ":", keep it as
|
||||
// part of the filename.
|
||||
if line, err := strconv.Atoi(fileline[i+1:]); err == nil {
|
||||
linenumber = line
|
||||
fileline = fileline[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return plugin.Frame{funcname, fileline, linenumber}, false
|
||||
}
|
||||
|
||||
// addrInfo returns the stack frame information for a specific program
|
||||
// address. It returns nil if the address could not be identified.
|
||||
func (d *addr2Liner) addrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
if err := d.rw.write(fmt.Sprintf("%x", addr-d.base)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.rw.write(fmt.Sprintf("%x", sentinel)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := d.readString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(resp, "0x") {
|
||||
return nil, fmt.Errorf("unexpected addr2line output: %s", resp)
|
||||
}
|
||||
|
||||
var stack []plugin.Frame
|
||||
for {
|
||||
frame, end := d.readFrame()
|
||||
if end {
|
||||
break
|
||||
}
|
||||
|
||||
if frame != (plugin.Frame{}) {
|
||||
stack = append(stack, frame)
|
||||
}
|
||||
}
|
||||
|
||||
// Get better name from nm if possible.
|
||||
if len(stack) > 0 && d.nm != nil {
|
||||
nm, err := d.nm.addrInfo(addr)
|
||||
if err == nil && len(nm) > 0 {
|
||||
// Last entry in frame list should match since
|
||||
// it is non-inlined. As a simple heuristic,
|
||||
// we only switch to the nm-based name if it
|
||||
// is longer.
|
||||
nmName := nm[len(nm)-1].Func
|
||||
a2lName := stack[len(stack)-1].Func
|
||||
if len(nmName) > len(a2lName) {
|
||||
stack[len(stack)-1].Func = nmName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stack, nil
|
||||
}
|
170
src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
generated
vendored
Normal file
170
src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLLVMSymbolizer = "llvm-symbolizer"
|
||||
)
|
||||
|
||||
// llvmSymbolizer is a connection to an llvm-symbolizer command for
|
||||
// obtaining address and line number information from a binary.
|
||||
type llvmSymbolizer struct {
|
||||
filename string
|
||||
rw lineReaderWriter
|
||||
base uint64
|
||||
}
|
||||
|
||||
type llvmSymbolizerJob struct {
|
||||
cmd *exec.Cmd
|
||||
in io.WriteCloser
|
||||
out *bufio.Reader
|
||||
}
|
||||
|
||||
func (a *llvmSymbolizerJob) write(s string) error {
|
||||
_, err := fmt.Fprint(a.in, s+"\n")
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *llvmSymbolizerJob) readLine() (string, error) {
|
||||
return a.out.ReadString('\n')
|
||||
}
|
||||
|
||||
// close releases any resources used by the llvmSymbolizer object.
|
||||
func (a *llvmSymbolizerJob) close() {
|
||||
a.in.Close()
|
||||
a.cmd.Wait()
|
||||
}
|
||||
|
||||
// newLlvmSymbolizer starts the given llvmSymbolizer command reporting
|
||||
// information about the given executable file. If file is a shared
|
||||
// library, base should be the address at which it was mapped in the
|
||||
// program under consideration.
|
||||
func newLLVMSymbolizer(cmd, file string, base uint64) (*llvmSymbolizer, error) {
|
||||
if cmd == "" {
|
||||
cmd = defaultLLVMSymbolizer
|
||||
}
|
||||
|
||||
j := &llvmSymbolizerJob{
|
||||
cmd: exec.Command(cmd, "-inlining", "-demangle=false"),
|
||||
}
|
||||
|
||||
var err error
|
||||
if j.in, err = j.cmd.StdinPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outPipe, err := j.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
j.out = bufio.NewReader(outPipe)
|
||||
if err := j.cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &llvmSymbolizer{
|
||||
filename: file,
|
||||
rw: j,
|
||||
base: base,
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (d *llvmSymbolizer) readString() (string, error) {
|
||||
s, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(s), nil
|
||||
}
|
||||
|
||||
// readFrame parses the llvm-symbolizer output for a single address. It
|
||||
// returns a populated plugin.Frame and whether it has reached the end of the
|
||||
// data.
|
||||
func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) {
|
||||
funcname, err := d.readString()
|
||||
if err != nil {
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
|
||||
switch funcname {
|
||||
case "":
|
||||
return plugin.Frame{}, true
|
||||
case "??":
|
||||
funcname = ""
|
||||
}
|
||||
|
||||
fileline, err := d.readString()
|
||||
if err != nil {
|
||||
return plugin.Frame{funcname, "", 0}, true
|
||||
}
|
||||
|
||||
linenumber := 0
|
||||
if fileline == "??:0" {
|
||||
fileline = ""
|
||||
} else {
|
||||
switch split := strings.Split(fileline, ":"); len(split) {
|
||||
case 1:
|
||||
// filename
|
||||
fileline = split[0]
|
||||
case 2, 3:
|
||||
// filename:line , or
|
||||
// filename:line:disc , or
|
||||
fileline = split[0]
|
||||
if line, err := strconv.Atoi(split[1]); err == nil {
|
||||
linenumber = line
|
||||
}
|
||||
default:
|
||||
// Unrecognized, ignore
|
||||
}
|
||||
}
|
||||
|
||||
return plugin.Frame{funcname, fileline, linenumber}, false
|
||||
}
|
||||
|
||||
// addrInfo returns the stack frame information for a specific program
|
||||
// address. It returns nil if the address could not be identified.
|
||||
func (d *llvmSymbolizer) addrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
if err := d.rw.write(fmt.Sprintf("%s 0x%x", d.filename, addr-d.base)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var stack []plugin.Frame
|
||||
for {
|
||||
frame, end := d.readFrame()
|
||||
if end {
|
||||
break
|
||||
}
|
||||
|
||||
if frame != (plugin.Frame{}) {
|
||||
stack = append(stack, frame)
|
||||
}
|
||||
}
|
||||
|
||||
return stack, nil
|
||||
}
|
123
src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go
generated
vendored
Normal file
123
src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNM = "nm"
|
||||
)
|
||||
|
||||
// addr2LinerNM is a connection to an nm command for obtaining address
|
||||
// information from a binary.
|
||||
type addr2LinerNM struct {
|
||||
m []symbolInfo // Sorted list of addresses from binary.
|
||||
}
|
||||
|
||||
type symbolInfo struct {
|
||||
address uint64
|
||||
name string
|
||||
}
|
||||
|
||||
// newAddr2LinerNM starts the given nm command reporting information about the
|
||||
// given executable file. If file is a shared library, base should be
|
||||
// the address at which it was mapped in the program under
|
||||
// consideration.
|
||||
func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) {
|
||||
if cmd == "" {
|
||||
cmd = defaultNM
|
||||
}
|
||||
|
||||
a := &addr2LinerNM{
|
||||
m: []symbolInfo{},
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
c := exec.Command(cmd, "-n", file)
|
||||
c.Stdout = &b
|
||||
|
||||
if err := c.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse nm output and populate symbol map.
|
||||
// Skip lines we fail to parse.
|
||||
buf := bufio.NewReader(&b)
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if line == "" && err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
fields := strings.SplitN(line, " ", 3)
|
||||
if len(fields) != 3 {
|
||||
continue
|
||||
}
|
||||
address, err := strconv.ParseUint(fields[0], 16, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
a.m = append(a.m, symbolInfo{
|
||||
address: address + base,
|
||||
name: fields[2],
|
||||
})
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// addrInfo returns the stack frame information for a specific program
|
||||
// address. It returns nil if the address could not be identified.
|
||||
func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
if len(a.m) == 0 || addr < a.m[0].address || addr > a.m[len(a.m)-1].address {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Binary search. Search until low, high are separated by 1.
|
||||
low, high := 0, len(a.m)
|
||||
for low+1 < high {
|
||||
mid := (low + high) / 2
|
||||
v := a.m[mid].address
|
||||
if addr == v {
|
||||
low = mid
|
||||
break
|
||||
} else if addr > v {
|
||||
low = mid
|
||||
} else {
|
||||
high = mid
|
||||
}
|
||||
}
|
||||
|
||||
// Address is between a.m[low] and a.m[high].
|
||||
// Pick low, as it represents [low, high).
|
||||
f := []plugin.Frame{
|
||||
{
|
||||
Func: a.m[low].name,
|
||||
},
|
||||
}
|
||||
return f, nil
|
||||
}
|
305
src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go
generated
vendored
Normal file
305
src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go
generated
vendored
Normal file
|
@ -0,0 +1,305 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package binutils provides access to the GNU binutils.
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"debug/macho"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/elfexec"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
// A Binutils implements plugin.ObjTool by invoking the GNU binutils.
|
||||
// SetConfig must be called before any of the other methods.
|
||||
type Binutils struct {
|
||||
// Commands to invoke.
|
||||
llvmSymbolizer string
|
||||
llvmSymbolizerFound bool
|
||||
addr2line string
|
||||
addr2lineFound bool
|
||||
nm string
|
||||
nmFound bool
|
||||
objdump string
|
||||
objdumpFound bool
|
||||
|
||||
// if fast, perform symbolization using nm (symbol names only),
|
||||
// instead of file-line detail from the slower addr2line.
|
||||
fast bool
|
||||
}
|
||||
|
||||
// SetFastSymbolization sets a toggle that makes binutils use fast
|
||||
// symbolization (using nm), which is much faster than addr2line but
|
||||
// provides only symbol name information (no file/line).
|
||||
func (b *Binutils) SetFastSymbolization(fast bool) {
|
||||
b.fast = fast
|
||||
}
|
||||
|
||||
// SetTools processes the contents of the tools option. It
|
||||
// expects a set of entries separated by commas; each entry is a pair
|
||||
// of the form t:path, where cmd will be used to look only for the
|
||||
// tool named t. If t is not specified, the path is searched for all
|
||||
// tools.
|
||||
func (b *Binutils) SetTools(config string) {
|
||||
// paths collect paths per tool; Key "" contains the default.
|
||||
paths := make(map[string][]string)
|
||||
for _, t := range strings.Split(config, ",") {
|
||||
name, path := "", t
|
||||
if ct := strings.SplitN(t, ":", 2); len(ct) == 2 {
|
||||
name, path = ct[0], ct[1]
|
||||
}
|
||||
paths[name] = append(paths[name], path)
|
||||
}
|
||||
|
||||
defaultPath := paths[""]
|
||||
b.llvmSymbolizer, b.llvmSymbolizerFound = findExe("llvm-symbolizer", append(paths["llvm-symbolizer"], defaultPath...))
|
||||
b.addr2line, b.addr2lineFound = findExe("addr2line", append(paths["addr2line"], defaultPath...))
|
||||
b.nm, b.nmFound = findExe("nm", append(paths["nm"], defaultPath...))
|
||||
b.objdump, b.objdumpFound = findExe("objdump", append(paths["objdump"], defaultPath...))
|
||||
}
|
||||
|
||||
// findExe looks for an executable command on a set of paths.
|
||||
// If it cannot find it, returns cmd.
|
||||
func findExe(cmd string, paths []string) (string, bool) {
|
||||
for _, p := range paths {
|
||||
cp := filepath.Join(p, cmd)
|
||||
if c, err := exec.LookPath(cp); err == nil {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
return cmd, false
|
||||
}
|
||||
|
||||
// Disasm returns the assembly instructions for the specified address range
|
||||
// of a binary.
|
||||
func (b *Binutils) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
|
||||
if b.addr2line == "" {
|
||||
// Update the command invocations if not initialized.
|
||||
b.SetTools("")
|
||||
}
|
||||
cmd := exec.Command(b.objdump, "-d", "-C", "--no-show-raw-insn", "-l",
|
||||
fmt.Sprintf("--start-address=%#x", start),
|
||||
fmt.Sprintf("--stop-address=%#x", end),
|
||||
file)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %v", cmd.Args, err)
|
||||
}
|
||||
|
||||
return disassemble(out)
|
||||
}
|
||||
|
||||
// Open satisfies the plugin.ObjTool interface.
|
||||
func (b *Binutils) Open(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
if b.addr2line == "" {
|
||||
// Update the command invocations if not initialized.
|
||||
b.SetTools("")
|
||||
}
|
||||
|
||||
// Make sure file is a supported executable.
|
||||
// The pprof driver uses Open to sniff the difference
|
||||
// between an executable and a profile.
|
||||
// For now, only ELF is supported.
|
||||
// Could read the first few bytes of the file and
|
||||
// use a table of prefixes if we need to support other
|
||||
// systems at some point.
|
||||
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
// For testing, do not require file name to exist.
|
||||
if strings.Contains(b.addr2line, "testdata/") {
|
||||
return &fileAddr2Line{file: file{b: b, name: name}}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f, err := b.openELF(name, start, limit, offset); err == nil {
|
||||
return f, nil
|
||||
}
|
||||
if f, err := b.openMachO(name, start, limit, offset); err == nil {
|
||||
return f, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unrecognized binary: %s", name)
|
||||
}
|
||||
|
||||
func (b *Binutils) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
of, err := macho.Open(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parsing %s: %v", name, err)
|
||||
}
|
||||
defer of.Close()
|
||||
|
||||
if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
|
||||
return &fileNM{file: file{b: b, name: name}}, nil
|
||||
}
|
||||
return &fileAddr2Line{file: file{b: b, name: name}}, nil
|
||||
}
|
||||
|
||||
func (b *Binutils) openELF(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
ef, err := elf.Open(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parsing %s: %v", name, err)
|
||||
}
|
||||
defer ef.Close()
|
||||
|
||||
var stextOffset *uint64
|
||||
var pageAligned = func(addr uint64) bool { return addr%4096 == 0 }
|
||||
if strings.Contains(name, "vmlinux") || !pageAligned(start) || !pageAligned(limit) || !pageAligned(offset) {
|
||||
// Reading all Symbols is expensive, and we only rarely need it so
|
||||
// we don't want to do it every time. But if _stext happens to be
|
||||
// page-aligned but isn't the same as Vaddr, we would symbolize
|
||||
// wrong. So if the name the addresses aren't page aligned, or if
|
||||
// the name is "vmlinux" we read _stext. We can be wrong if: (1)
|
||||
// someone passes a kernel path that doesn't contain "vmlinux" AND
|
||||
// (2) _stext is page-aligned AND (3) _stext is not at Vaddr
|
||||
symbols, err := ef.Symbols()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range symbols {
|
||||
if s.Name == "_stext" {
|
||||
// The kernel may use _stext as the mapping start address.
|
||||
stextOffset = &s.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
base, err := elfexec.GetBase(&ef.FileHeader, nil, stextOffset, start, limit, offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not identify base for %s: %v", name, err)
|
||||
}
|
||||
|
||||
buildID := ""
|
||||
if f, err := os.Open(name); err == nil {
|
||||
if id, err := elfexec.GetBuildID(f); err == nil {
|
||||
buildID = fmt.Sprintf("%x", id)
|
||||
}
|
||||
}
|
||||
if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
|
||||
return &fileNM{file: file{b, name, base, buildID}}, nil
|
||||
}
|
||||
return &fileAddr2Line{file: file{b, name, base, buildID}}, nil
|
||||
}
|
||||
|
||||
// file implements the binutils.ObjFile interface.
|
||||
type file struct {
|
||||
b *Binutils
|
||||
name string
|
||||
base uint64
|
||||
buildID string
|
||||
}
|
||||
|
||||
func (f *file) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *file) Base() uint64 {
|
||||
return f.base
|
||||
}
|
||||
|
||||
func (f *file) BuildID() string {
|
||||
return f.buildID
|
||||
}
|
||||
|
||||
func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
return []plugin.Frame{}, nil
|
||||
}
|
||||
|
||||
func (f *file) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
||||
// Get from nm a list of symbols sorted by address.
|
||||
cmd := exec.Command(f.b.nm, "-n", f.name)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %v", cmd.Args, err)
|
||||
}
|
||||
|
||||
return findSymbols(out, f.name, r, addr)
|
||||
}
|
||||
|
||||
// fileNM implements the binutils.ObjFile interface, using 'nm' to map
|
||||
// addresses to symbols (without file/line number information). It is
|
||||
// faster than fileAddr2Line.
|
||||
type fileNM struct {
|
||||
file
|
||||
addr2linernm *addr2LinerNM
|
||||
}
|
||||
|
||||
func (f *fileNM) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
if f.addr2linernm == nil {
|
||||
addr2liner, err := newAddr2LinerNM(f.b.nm, f.name, f.base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.addr2linernm = addr2liner
|
||||
}
|
||||
return f.addr2linernm.addrInfo(addr)
|
||||
}
|
||||
|
||||
// fileAddr2Line implements the binutils.ObjFile interface, using
|
||||
// 'addr2line' to map addresses to symbols (with file/line number
|
||||
// information). It can be slow for large binaries with debug
|
||||
// information.
|
||||
type fileAddr2Line struct {
|
||||
file
|
||||
addr2liner *addr2Liner
|
||||
llvmSymbolizer *llvmSymbolizer
|
||||
}
|
||||
|
||||
func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
if f.llvmSymbolizer != nil {
|
||||
return f.llvmSymbolizer.addrInfo(addr)
|
||||
}
|
||||
if f.addr2liner != nil {
|
||||
return f.addr2liner.addrInfo(addr)
|
||||
}
|
||||
|
||||
if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base); err == nil {
|
||||
f.llvmSymbolizer = llvmSymbolizer
|
||||
return f.llvmSymbolizer.addrInfo(addr)
|
||||
}
|
||||
|
||||
if addr2liner, err := newAddr2Liner(f.b.addr2line, f.name, f.base); err == nil {
|
||||
f.addr2liner = addr2liner
|
||||
|
||||
// When addr2line encounters some gcc compiled binaries, it
|
||||
// drops interesting parts of names in anonymous namespaces.
|
||||
// Fallback to NM for better function names.
|
||||
if nm, err := newAddr2LinerNM(f.b.nm, f.name, f.base); err == nil {
|
||||
f.addr2liner.nm = nm
|
||||
}
|
||||
return f.addr2liner.addrInfo(addr)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not find local addr2liner")
|
||||
}
|
||||
|
||||
func (f *fileAddr2Line) Close() error {
|
||||
if f.addr2liner != nil {
|
||||
f.addr2liner.rw.close()
|
||||
f.addr2liner = nil
|
||||
}
|
||||
return nil
|
||||
}
|
152
src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils_test.go
generated
vendored
Normal file
152
src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils_test.go
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
var testAddrMap = map[int]string{
|
||||
1000: "_Z3fooid.clone2",
|
||||
2000: "_ZNSaIiEC1Ev.clone18",
|
||||
3000: "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm",
|
||||
}
|
||||
|
||||
func functionName(level int) (name string) {
|
||||
if name = testAddrMap[level]; name != "" {
|
||||
return name
|
||||
}
|
||||
return fmt.Sprintf("fun%d", level)
|
||||
}
|
||||
|
||||
func TestAddr2Liner(t *testing.T) {
|
||||
const offset = 0x500
|
||||
|
||||
a := addr2Liner{&mockAddr2liner{}, offset, nil}
|
||||
for i := 1; i < 8; i++ {
|
||||
addr := i*0x1000 + offset
|
||||
s, err := a.addrInfo(uint64(addr))
|
||||
if err != nil {
|
||||
t.Fatalf("addrInfo(%#x): %v", addr, err)
|
||||
}
|
||||
if len(s) != i {
|
||||
t.Fatalf("addrInfo(%#x): got len==%d, want %d", addr, len(s), i)
|
||||
}
|
||||
for l, f := range s {
|
||||
level := (len(s) - l) * 1000
|
||||
want := plugin.Frame{functionName(level), fmt.Sprintf("file%d", level), level}
|
||||
|
||||
if f != want {
|
||||
t.Errorf("AddrInfo(%#x)[%d]: = %+v, want %+v", addr, l, f, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
s, err := a.addrInfo(0xFFFF)
|
||||
if err != nil {
|
||||
t.Fatalf("addrInfo(0xFFFF): %v", err)
|
||||
}
|
||||
if len(s) != 0 {
|
||||
t.Fatalf("AddrInfo(0xFFFF): got len==%d, want 0", len(s))
|
||||
}
|
||||
a.rw.close()
|
||||
}
|
||||
|
||||
type mockAddr2liner struct {
|
||||
output []string
|
||||
}
|
||||
|
||||
func (a *mockAddr2liner) write(s string) error {
|
||||
var lines []string
|
||||
switch s {
|
||||
case "1000":
|
||||
lines = []string{"_Z3fooid.clone2", "file1000:1000"}
|
||||
case "2000":
|
||||
lines = []string{"_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "3000":
|
||||
lines = []string{"_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "4000":
|
||||
lines = []string{"fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "5000":
|
||||
lines = []string{"fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "6000":
|
||||
lines = []string{"fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "7000":
|
||||
lines = []string{"fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "8000":
|
||||
lines = []string{"fun8000", "file8000:8000", "fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "9000":
|
||||
lines = []string{"fun9000", "file9000:9000", "fun8000", "file8000:8000", "fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
default:
|
||||
lines = []string{"??", "??:0"}
|
||||
}
|
||||
a.output = append(a.output, "0x"+s)
|
||||
a.output = append(a.output, lines...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *mockAddr2liner) readLine() (string, error) {
|
||||
if len(a.output) == 0 {
|
||||
return "", fmt.Errorf("end of file")
|
||||
}
|
||||
next := a.output[0]
|
||||
a.output = a.output[1:]
|
||||
return next, nil
|
||||
}
|
||||
|
||||
func (a *mockAddr2liner) close() {
|
||||
}
|
||||
|
||||
func TestAddr2LinerLookup(t *testing.T) {
|
||||
oddSizedMap := addr2LinerNM{
|
||||
m: []symbolInfo{
|
||||
{0x1000, "0x1000"},
|
||||
{0x2000, "0x2000"},
|
||||
{0x3000, "0x3000"},
|
||||
},
|
||||
}
|
||||
evenSizedMap := addr2LinerNM{
|
||||
m: []symbolInfo{
|
||||
{0x1000, "0x1000"},
|
||||
{0x2000, "0x2000"},
|
||||
{0x3000, "0x3000"},
|
||||
{0x4000, "0x4000"},
|
||||
},
|
||||
}
|
||||
for _, a := range []*addr2LinerNM{
|
||||
&oddSizedMap, &evenSizedMap,
|
||||
} {
|
||||
for address, want := range map[uint64]string{
|
||||
0x1000: "0x1000",
|
||||
0x1001: "0x1000",
|
||||
0x1FFF: "0x1000",
|
||||
0x2000: "0x2000",
|
||||
0x2001: "0x2000",
|
||||
} {
|
||||
if got, _ := a.addrInfo(address); !checkAddress(got, address, want) {
|
||||
t.Errorf("%x: got %v, want %s", address, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkAddress(got []plugin.Frame, address uint64, want string) bool {
|
||||
if len(got) != 1 {
|
||||
return false
|
||||
}
|
||||
return got[0].Func == want
|
||||
}
|
147
src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
generated
vendored
Normal file
147
src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/ianlancetaylor/demangle"
|
||||
)
|
||||
|
||||
var (
|
||||
nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`)
|
||||
objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`)
|
||||
objdumpOutputFileLine = regexp.MustCompile(`^(.*):([0-9]+)`)
|
||||
objdumpOutputFunction = regexp.MustCompile(`^(\S.*)\(\):`)
|
||||
)
|
||||
|
||||
func findSymbols(syms []byte, file string, r *regexp.Regexp, address uint64) ([]*plugin.Sym, error) {
|
||||
// Collect all symbols from the nm output, grouping names mapped to
|
||||
// the same address into a single symbol.
|
||||
var symbols []*plugin.Sym
|
||||
names, start := []string{}, uint64(0)
|
||||
buf := bytes.NewBuffer(syms)
|
||||
for symAddr, name, err := nextSymbol(buf); err == nil; symAddr, name, err = nextSymbol(buf) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if start == symAddr {
|
||||
names = append(names, name)
|
||||
continue
|
||||
}
|
||||
if match := matchSymbol(names, start, symAddr-1, r, address); match != nil {
|
||||
symbols = append(symbols, &plugin.Sym{match, file, start, symAddr - 1})
|
||||
}
|
||||
names, start = []string{name}, symAddr
|
||||
}
|
||||
|
||||
return symbols, nil
|
||||
}
|
||||
|
||||
// matchSymbol checks if a symbol is to be selected by checking its
|
||||
// name to the regexp and optionally its address. It returns the name(s)
|
||||
// to be used for the matched symbol, or nil if no match
|
||||
func matchSymbol(names []string, start, end uint64, r *regexp.Regexp, address uint64) []string {
|
||||
if address != 0 && address >= start && address <= end {
|
||||
return names
|
||||
}
|
||||
for _, name := range names {
|
||||
if r.MatchString(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Match all possible demangled versions of the name.
|
||||
for _, o := range [][]demangle.Option{
|
||||
{demangle.NoClones},
|
||||
{demangle.NoParams},
|
||||
{demangle.NoParams, demangle.NoTemplateParams},
|
||||
} {
|
||||
if demangled, err := demangle.ToString(name, o...); err == nil && r.MatchString(demangled) {
|
||||
return []string{demangled}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// disassemble parses the output of the objdump command and returns
|
||||
// the assembly instructions in a slice.
|
||||
func disassemble(asm []byte) ([]plugin.Inst, error) {
|
||||
buf := bytes.NewBuffer(asm)
|
||||
function, file, line := "", "", 0
|
||||
var assembly []plugin.Inst
|
||||
for {
|
||||
input, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if input == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 {
|
||||
if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil {
|
||||
assembly = append(assembly,
|
||||
plugin.Inst{
|
||||
Addr: address,
|
||||
Text: fields[2],
|
||||
Function: function,
|
||||
File: file,
|
||||
Line: line,
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
if fields := objdumpOutputFileLine.FindStringSubmatch(input); len(fields) == 3 {
|
||||
if l, err := strconv.ParseUint(fields[2], 10, 32); err == nil {
|
||||
file, line = fields[1], int(l)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fields := objdumpOutputFunction.FindStringSubmatch(input); len(fields) == 2 {
|
||||
function = fields[1]
|
||||
continue
|
||||
}
|
||||
// Reset on unrecognized lines.
|
||||
function, file, line = "", "", 0
|
||||
}
|
||||
|
||||
return assembly, nil
|
||||
}
|
||||
|
||||
// nextSymbol parses the nm output to find the next symbol listed.
|
||||
// Skips over any output it cannot recognize.
|
||||
func nextSymbol(buf *bytes.Buffer) (uint64, string, error) {
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF || line == "" {
|
||||
return 0, "", err
|
||||
}
|
||||
}
|
||||
|
||||
if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 {
|
||||
if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil {
|
||||
return address, fields[3], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
154
src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm_test.go
generated
vendored
Normal file
154
src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm_test.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
// TestFindSymbols tests the FindSymbols routine using a hardcoded nm output.
|
||||
func TestFindSymbols(t *testing.T) {
|
||||
type testcase struct {
|
||||
query, syms string
|
||||
want []plugin.Sym
|
||||
}
|
||||
|
||||
testsyms := `0000000000001000 t lineA001
|
||||
0000000000001000 t lineA002
|
||||
0000000000001000 t line1000
|
||||
0000000000002000 t line200A
|
||||
0000000000002000 t line2000
|
||||
0000000000002000 t line200B
|
||||
0000000000003000 t line3000
|
||||
0000000000003000 t _ZNK4DumbclEPKc
|
||||
0000000000003000 t lineB00C
|
||||
0000000000003000 t line300D
|
||||
0000000000004000 t _the_end
|
||||
`
|
||||
testcases := []testcase{
|
||||
{
|
||||
"line.*[AC]",
|
||||
testsyms,
|
||||
[]plugin.Sym{
|
||||
{[]string{"lineA001"}, "object.o", 0x1000, 0x1FFF},
|
||||
{[]string{"line200A"}, "object.o", 0x2000, 0x2FFF},
|
||||
{[]string{"lineB00C"}, "object.o", 0x3000, 0x3FFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
"Dumb::operator",
|
||||
testsyms,
|
||||
[]plugin.Sym{
|
||||
{[]string{"Dumb::operator()(char const*) const"}, "object.o", 0x3000, 0x3FFF},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
syms, err := findSymbols([]byte(tc.syms), "object.o", regexp.MustCompile(tc.query), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("%q: findSymbols: %v", tc.query, err)
|
||||
}
|
||||
if err := checkSymbol(syms, tc.want); err != nil {
|
||||
t.Errorf("%q: %v", tc.query, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkSymbol(got []*plugin.Sym, want []plugin.Sym) error {
|
||||
if len(got) != len(want) {
|
||||
return fmt.Errorf("unexpected number of symbols %d (want %d)\n", len(got), len(want))
|
||||
}
|
||||
|
||||
for i, g := range got {
|
||||
w := want[i]
|
||||
if len(g.Name) != len(w.Name) {
|
||||
return fmt.Errorf("names, got %d, want %d", len(g.Name), len(w.Name))
|
||||
}
|
||||
for n := range g.Name {
|
||||
if g.Name[n] != w.Name[n] {
|
||||
return fmt.Errorf("name %d, got %q, want %q", n, g.Name[n], w.Name[n])
|
||||
}
|
||||
}
|
||||
if g.File != w.File {
|
||||
return fmt.Errorf("filename, got %q, want %q", g.File, w.File)
|
||||
}
|
||||
if g.Start != w.Start {
|
||||
return fmt.Errorf("start address, got %#x, want %#x", g.Start, w.Start)
|
||||
}
|
||||
if g.End != w.End {
|
||||
return fmt.Errorf("end address, got %#x, want %#x", g.End, w.End)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestFunctionAssembly tests the FunctionAssembly routine by using a
|
||||
// fake objdump script.
|
||||
func TestFunctionAssembly(t *testing.T) {
|
||||
type testcase struct {
|
||||
s plugin.Sym
|
||||
asm string
|
||||
want []plugin.Inst
|
||||
}
|
||||
testcases := []testcase{
|
||||
{
|
||||
plugin.Sym{[]string{"symbol1"}, "", 0x1000, 0x1FFF},
|
||||
` 1000: instruction one
|
||||
1001: instruction two
|
||||
1002: instruction three
|
||||
1003: instruction four
|
||||
`,
|
||||
[]plugin.Inst{
|
||||
{Addr: 0x1000, Text: "instruction one"},
|
||||
{Addr: 0x1001, Text: "instruction two"},
|
||||
{Addr: 0x1002, Text: "instruction three"},
|
||||
{Addr: 0x1003, Text: "instruction four"},
|
||||
},
|
||||
},
|
||||
{
|
||||
plugin.Sym{[]string{"symbol2"}, "", 0x2000, 0x2FFF},
|
||||
` 2000: instruction one
|
||||
2001: instruction two
|
||||
`,
|
||||
[]plugin.Inst{
|
||||
{Addr: 0x2000, Text: "instruction one"},
|
||||
{Addr: 0x2001, Text: "instruction two"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const objdump = "testdata/wrapper/objdump"
|
||||
|
||||
for _, tc := range testcases {
|
||||
insts, err := disassemble([]byte(tc.asm))
|
||||
if err != nil {
|
||||
t.Fatalf("FunctionAssembly: %v", err)
|
||||
}
|
||||
|
||||
if len(insts) != len(tc.want) {
|
||||
t.Errorf("Unexpected number of assembly instructions %d (want %d)\n", len(insts), len(tc.want))
|
||||
}
|
||||
for i := range insts {
|
||||
if insts[i] != tc.want[i] {
|
||||
t.Errorf("Expected symbol %v, got %v\n", tc.want[i], insts[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
271
src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go
generated
vendored
Normal file
271
src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go
generated
vendored
Normal file
|
@ -0,0 +1,271 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/binutils"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
type source struct {
|
||||
Sources []string
|
||||
ExecName string
|
||||
BuildID string
|
||||
Base []string
|
||||
|
||||
Seconds int
|
||||
Timeout int
|
||||
Symbolize string
|
||||
}
|
||||
|
||||
// Parse parses the command lines through the specified flags package
|
||||
// and returns the source of the profile and optionally the command
|
||||
// for the kind of report to generate (nil for interactive use).
|
||||
func parseFlags(o *plugin.Options) (*source, []string, error) {
|
||||
flag := o.Flagset
|
||||
// Comparisons.
|
||||
flagBase := flag.StringList("base", "", "Source for base profile for comparison")
|
||||
// Internal options.
|
||||
flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization")
|
||||
flagBuildID := flag.String("buildid", "", "Override build id for first mapping")
|
||||
// CPU profile options
|
||||
flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles")
|
||||
// Heap profile options
|
||||
flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size")
|
||||
flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts")
|
||||
flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size")
|
||||
flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts")
|
||||
// Contention profile options
|
||||
flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region")
|
||||
flagContentions := flag.Bool("contentions", false, "Display number of delays at each region")
|
||||
flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region")
|
||||
flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames")
|
||||
|
||||
flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile")
|
||||
|
||||
// Flags used during command processing
|
||||
installedFlags := installFlags(flag)
|
||||
|
||||
flagCommands := make(map[string]*bool)
|
||||
flagParamCommands := make(map[string]*string)
|
||||
for name, cmd := range pprofCommands {
|
||||
if cmd.hasParam {
|
||||
flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp")
|
||||
} else {
|
||||
flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format")
|
||||
}
|
||||
}
|
||||
|
||||
args := flag.Parse(func() {
|
||||
o.UI.Print(usageMsgHdr +
|
||||
usage(true) +
|
||||
usageMsgSrc +
|
||||
flag.ExtraUsage() +
|
||||
usageMsgVars)
|
||||
})
|
||||
if len(args) == 0 {
|
||||
return nil, nil, fmt.Errorf("no profile source specified")
|
||||
}
|
||||
|
||||
var execName string
|
||||
// Recognize first argument as an executable or buildid override.
|
||||
if len(args) > 1 {
|
||||
arg0 := args[0]
|
||||
if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0); err == nil {
|
||||
file.Close()
|
||||
execName = arg0
|
||||
args = args[1:]
|
||||
} else if *flagBuildID == "" && isBuildID(arg0) {
|
||||
*flagBuildID = arg0
|
||||
args = args[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Report conflicting options
|
||||
if err := updateFlags(installedFlags); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cmd, err := outputFormat(flagCommands, flagParamCommands)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
si := pprofVariables["sample_index"].value
|
||||
si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI)
|
||||
si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI)
|
||||
si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI)
|
||||
si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI)
|
||||
si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI)
|
||||
si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI)
|
||||
si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI)
|
||||
pprofVariables.set("sample_index", si)
|
||||
|
||||
if *flagMeanDelay {
|
||||
pprofVariables.set("mean", "true")
|
||||
}
|
||||
|
||||
source := &source{
|
||||
Sources: args,
|
||||
ExecName: execName,
|
||||
BuildID: *flagBuildID,
|
||||
Seconds: *flagSeconds,
|
||||
Timeout: *flagTimeout,
|
||||
Symbolize: *flagSymbolize,
|
||||
}
|
||||
|
||||
for _, s := range *flagBase {
|
||||
if *s != "" {
|
||||
source.Base = append(source.Base, *s)
|
||||
}
|
||||
}
|
||||
|
||||
if bu, ok := o.Obj.(*binutils.Binutils); ok {
|
||||
bu.SetTools(*flagTools)
|
||||
}
|
||||
return source, cmd, nil
|
||||
}
|
||||
|
||||
// installFlags creates command line flags for pprof variables.
|
||||
func installFlags(flag plugin.FlagSet) flagsInstalled {
|
||||
f := flagsInstalled{
|
||||
ints: make(map[string]*int),
|
||||
bools: make(map[string]*bool),
|
||||
floats: make(map[string]*float64),
|
||||
strings: make(map[string]*string),
|
||||
}
|
||||
for n, v := range pprofVariables {
|
||||
switch v.kind {
|
||||
case boolKind:
|
||||
if v.group != "" {
|
||||
// Set all radio variables to false to identify conflicts.
|
||||
f.bools[n] = flag.Bool(n, false, v.help)
|
||||
} else {
|
||||
f.bools[n] = flag.Bool(n, v.boolValue(), v.help)
|
||||
}
|
||||
case intKind:
|
||||
f.ints[n] = flag.Int(n, v.intValue(), v.help)
|
||||
case floatKind:
|
||||
f.floats[n] = flag.Float64(n, v.floatValue(), v.help)
|
||||
case stringKind:
|
||||
f.strings[n] = flag.String(n, v.value, v.help)
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// updateFlags updates the pprof variables according to the flags
|
||||
// parsed in the command line.
|
||||
func updateFlags(f flagsInstalled) error {
|
||||
vars := pprofVariables
|
||||
groups := map[string]string{}
|
||||
for n, v := range f.bools {
|
||||
vars.set(n, fmt.Sprint(*v))
|
||||
if *v {
|
||||
g := vars[n].group
|
||||
if g != "" && groups[g] != "" {
|
||||
return fmt.Errorf("conflicting options %q and %q set", n, groups[g])
|
||||
}
|
||||
groups[g] = n
|
||||
}
|
||||
}
|
||||
for n, v := range f.ints {
|
||||
vars.set(n, fmt.Sprint(*v))
|
||||
}
|
||||
for n, v := range f.floats {
|
||||
vars.set(n, fmt.Sprint(*v))
|
||||
}
|
||||
for n, v := range f.strings {
|
||||
vars.set(n, *v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type flagsInstalled struct {
|
||||
ints map[string]*int
|
||||
bools map[string]*bool
|
||||
floats map[string]*float64
|
||||
strings map[string]*string
|
||||
}
|
||||
|
||||
// isBuildID determines if the profile may contain a build ID, by
|
||||
// checking that it is a string of hex digits.
|
||||
func isBuildID(id string) bool {
|
||||
return strings.Trim(id, "0123456789abcdefABCDEF") == ""
|
||||
}
|
||||
|
||||
func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string {
|
||||
if *flag {
|
||||
if si == "" {
|
||||
return sampleType
|
||||
}
|
||||
ui.PrintErr("Multiple value selections, ignoring ", option)
|
||||
}
|
||||
return si
|
||||
}
|
||||
|
||||
func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) {
|
||||
for n, b := range bcmd {
|
||||
if *b {
|
||||
if cmd != nil {
|
||||
return nil, fmt.Errorf("must set at most one output format")
|
||||
}
|
||||
cmd = []string{n}
|
||||
}
|
||||
}
|
||||
for n, s := range acmd {
|
||||
if *s != "" {
|
||||
if cmd != nil {
|
||||
return nil, fmt.Errorf("must set at most one output format")
|
||||
}
|
||||
cmd = []string{n, *s}
|
||||
}
|
||||
}
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
var usageMsgHdr = "usage: pprof [options] [-base source] [binary] <source> ...\n"
|
||||
|
||||
var usageMsgSrc = "\n\n" +
|
||||
" Source options:\n" +
|
||||
" -seconds Duration for time-based profile collection\n" +
|
||||
" -timeout Timeout in seconds for profile collection\n" +
|
||||
" -buildid Override build id for main binary\n" +
|
||||
" -base source Source of profile to use as baseline\n" +
|
||||
" profile.pb.gz Profile in compressed protobuf format\n" +
|
||||
" legacy_profile Profile in legacy pprof format\n" +
|
||||
" http://host/profile URL for profile handler to retrieve\n" +
|
||||
" -symbolize= Controls source of symbol information\n" +
|
||||
" none Do not attempt symbolization\n" +
|
||||
" local Examine only local binaries\n" +
|
||||
" fastlocal Only get function names from local binaries\n" +
|
||||
" remote Do not examine local binaries\n" +
|
||||
" force Force re-symbolization\n" +
|
||||
" Binary Local path or build id of binary for symbolization\n"
|
||||
|
||||
var usageMsgVars = "\n\n" +
|
||||
" Misc options:\n" +
|
||||
" -tools Search path for object tools\n" +
|
||||
"\n" +
|
||||
" Environment Variables:\n" +
|
||||
" PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" +
|
||||
" PPROF_TOOLS Search path for object-level tools\n" +
|
||||
" PPROF_BINARY_PATH Search path for local binary files\n" +
|
||||
" default: $HOME/pprof/binaries\n" +
|
||||
" finds binaries by $name and $buildid/$name\n"
|
561
src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go
generated
vendored
Normal file
561
src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go
generated
vendored
Normal file
|
@ -0,0 +1,561 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/report"
|
||||
"github.com/google/pprof/third_party/svg"
|
||||
)
|
||||
|
||||
// commands describes the commands accepted by pprof.
|
||||
type commands map[string]*command
|
||||
|
||||
// command describes the actions for a pprof command. Includes a
|
||||
// function for command-line completion, the report format to use
|
||||
// during report generation, any postprocessing functions, and whether
|
||||
// the command expects a regexp parameter (typically a function name).
|
||||
type command struct {
|
||||
format int // report format to generate
|
||||
postProcess PostProcessor // postprocessing to run on report
|
||||
visualizer PostProcessor // display output using some callback
|
||||
hasParam bool // collect a parameter from the CLI
|
||||
description string // single-line description text saying what the command does
|
||||
usage string // multi-line help text saying how the command is used
|
||||
}
|
||||
|
||||
// help returns a help string for a command.
|
||||
func (c *command) help(name string) string {
|
||||
message := c.description + "\n"
|
||||
if c.usage != "" {
|
||||
message += " Usage:\n"
|
||||
lines := strings.Split(c.usage, "\n")
|
||||
for _, line := range lines {
|
||||
message += fmt.Sprintf(" %s\n", line)
|
||||
}
|
||||
}
|
||||
return message + "\n"
|
||||
}
|
||||
|
||||
// AddCommand adds an additional command to the set of commands
|
||||
// accepted by pprof. This enables extensions to add new commands for
|
||||
// specialized visualization formats. If the command specified already
|
||||
// exists, it is overwritten.
|
||||
func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) {
|
||||
pprofCommands[cmd] = &command{format, post, nil, false, desc, usage}
|
||||
}
|
||||
|
||||
// SetVariableDefault sets the default value for a pprof
|
||||
// variable. This enables extensions to set their own defaults.
|
||||
func SetVariableDefault(variable, value string) {
|
||||
if v := pprofVariables[variable]; v != nil {
|
||||
v.value = value
|
||||
}
|
||||
}
|
||||
|
||||
// PostProcessor is a function that applies post-processing to the report output
|
||||
type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error
|
||||
|
||||
// interactiveMode is true if pprof is running on interactive mode, reading
|
||||
// commands from its shell.
|
||||
var interactiveMode = false
|
||||
|
||||
// pprofCommands are the report generation commands recognized by pprof.
|
||||
var pprofCommands = commands{
|
||||
// Commands that require no post-processing.
|
||||
"comments": {report.Comments, nil, nil, false, "Output all profile comments", ""},
|
||||
"disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)},
|
||||
"dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)},
|
||||
"list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)},
|
||||
"peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."},
|
||||
"raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""},
|
||||
"tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."},
|
||||
"text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)},
|
||||
"top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)},
|
||||
"traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""},
|
||||
"tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)},
|
||||
|
||||
// Save binary formats to a file
|
||||
"callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)},
|
||||
"proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""},
|
||||
"topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""},
|
||||
|
||||
// Generate report in DOT format and postprocess with dot
|
||||
"gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)},
|
||||
"pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)},
|
||||
"png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)},
|
||||
"ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)},
|
||||
|
||||
// Save SVG output into a file
|
||||
"svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)},
|
||||
|
||||
// Visualize postprocessed dot output
|
||||
"eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)},
|
||||
"evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)},
|
||||
"gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)},
|
||||
"web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)},
|
||||
|
||||
// Visualize callgrind output
|
||||
"kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)},
|
||||
|
||||
// Visualize HTML directly generated by report.
|
||||
"weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)},
|
||||
}
|
||||
|
||||
// pprofVariables are the configuration parameters that affect the
|
||||
// reported generated by pprof.
|
||||
var pprofVariables = variables{
|
||||
// Filename for file-based output formats, stdout by default.
|
||||
"output": &variable{stringKind, "", "", helpText("Output filename for file-based outputs")},
|
||||
|
||||
// Comparisons.
|
||||
"drop_negative": &variable{boolKind, "f", "", helpText(
|
||||
"Ignore negative differences",
|
||||
"Do not show any locations with values <0.")},
|
||||
|
||||
// Comparisons.
|
||||
"positive_percentages": &variable{boolKind, "f", "", helpText(
|
||||
"Ignore negative samples when computing percentages",
|
||||
" Do not count negative samples when computing the total value",
|
||||
" of the profile, used to compute percentages. If set, and the -base",
|
||||
" option is used, percentages reported will be computed against the",
|
||||
" main profile, ignoring the base profile.")},
|
||||
|
||||
// Graph handling options.
|
||||
"call_tree": &variable{boolKind, "f", "", helpText(
|
||||
"Create a context-sensitive call tree",
|
||||
"Treat locations reached through different paths as separate.")},
|
||||
|
||||
// Display options.
|
||||
"relative_percentages": &variable{boolKind, "f", "", helpText(
|
||||
"Show percentages relative to focused subgraph",
|
||||
"If unset, percentages are relative to full graph before focusing",
|
||||
"to facilitate comparison with original graph.")},
|
||||
"unit": &variable{stringKind, "minimum", "", helpText(
|
||||
"Measurement units to display",
|
||||
"Scale the sample values to this unit.",
|
||||
" For time-based profiles, use seconds, milliseconds, nanoseconds, etc.",
|
||||
" For memory profiles, use megabytes, kilobytes, bytes, etc.",
|
||||
" auto will scale each value independently to the most natural unit.")},
|
||||
"compact_labels": &variable{boolKind, "f", "", "Show minimal headers"},
|
||||
"source_path": &variable{stringKind, "", "", "Search path for source files"},
|
||||
|
||||
// Filtering options
|
||||
"nodecount": &variable{intKind, "-1", "", helpText(
|
||||
"Max number of nodes to show",
|
||||
"Uses heuristics to limit the number of locations to be displayed.",
|
||||
"On graphs, dotted edges represent paths through nodes that have been removed.")},
|
||||
"nodefraction": &variable{floatKind, "0.005", "", "Hide nodes below <f>*total"},
|
||||
"edgefraction": &variable{floatKind, "0.001", "", "Hide edges below <f>*total"},
|
||||
"trim": &variable{boolKind, "t", "", helpText(
|
||||
"Honor nodefraction/edgefraction/nodecount defaults",
|
||||
"Set to false to get the full profile, without any trimming.")},
|
||||
"focus": &variable{stringKind, "", "", helpText(
|
||||
"Restricts to samples going through a node matching regexp",
|
||||
"Discard samples that do not include a node matching this regexp.",
|
||||
"Matching includes the function name, filename or object name.")},
|
||||
"ignore": &variable{stringKind, "", "", helpText(
|
||||
"Skips paths going through any nodes matching regexp",
|
||||
"If set, discard samples that include a node matching this regexp.",
|
||||
"Matching includes the function name, filename or object name.")},
|
||||
"prune_from": &variable{stringKind, "", "", helpText(
|
||||
"Drops any functions below the matched frame.",
|
||||
"If set, any frames matching the specified regexp and any frames",
|
||||
"below it will be dropped from each sample.")},
|
||||
"hide": &variable{stringKind, "", "", helpText(
|
||||
"Skips nodes matching regexp",
|
||||
"Discard nodes that match this location.",
|
||||
"Other nodes from samples that include this location will be shown.",
|
||||
"Matching includes the function name, filename or object name.")},
|
||||
"show": &variable{stringKind, "", "", helpText(
|
||||
"Only show nodes matching regexp",
|
||||
"If set, only show nodes that match this location.",
|
||||
"Matching includes the function name, filename or object name.")},
|
||||
"tagfocus": &variable{stringKind, "", "", helpText(
|
||||
"Restrict to samples with tags in range or matched by regexp",
|
||||
"Discard samples that do not include a node with a tag matching this regexp.")},
|
||||
"tagignore": &variable{stringKind, "", "", helpText(
|
||||
"Discard samples with tags in range or matched by regexp",
|
||||
"Discard samples that do include a node with a tag matching this regexp.")},
|
||||
"tagshow": &variable{stringKind, "", "", helpText(
|
||||
"Only consider tags matching this regexp",
|
||||
"Discard tags that do not match this regexp")},
|
||||
"taghide": &variable{stringKind, "", "", helpText(
|
||||
"Skip tags matching this regexp",
|
||||
"Discard tags that match this regexp")},
|
||||
// Heap profile options
|
||||
"divide_by": &variable{floatKind, "1", "", helpText(
|
||||
"Ratio to divide all samples before visualization",
|
||||
"Divide all samples values by a constant, eg the number of processors or jobs.")},
|
||||
"mean": &variable{boolKind, "f", "", helpText(
|
||||
"Average sample value over first value (count)",
|
||||
"For memory profiles, report average memory per allocation.",
|
||||
"For time-based profiles, report average time per event.")},
|
||||
"sample_index": &variable{stringKind, "", "", helpText(
|
||||
"Sample value to report (0-based index or name)",
|
||||
"Profiles contain multiple values per sample.",
|
||||
"Use sample_index=i to select the ith value (starting at 0).")},
|
||||
|
||||
// Data sorting criteria
|
||||
"flat": &variable{boolKind, "t", "cumulative", helpText("Sort entries based on own weight")},
|
||||
"cum": &variable{boolKind, "f", "cumulative", helpText("Sort entries based on cumulative weight")},
|
||||
|
||||
// Output granularity
|
||||
"functions": &variable{boolKind, "t", "granularity", helpText(
|
||||
"Aggregate at the function level.",
|
||||
"Takes into account the filename/lineno where the function was defined.")},
|
||||
"functionnameonly": &variable{boolKind, "f", "granularity", helpText(
|
||||
"Aggregate at the function level.",
|
||||
"Ignores the filename/lineno where the function was defined.")},
|
||||
"files": &variable{boolKind, "f", "granularity", "Aggregate at the file level."},
|
||||
"lines": &variable{boolKind, "f", "granularity", "Aggregate at the source code line level."},
|
||||
"addresses": &variable{boolKind, "f", "granularity", helpText(
|
||||
"Aggregate at the function level.",
|
||||
"Includes functions' addresses in the output.")},
|
||||
"noinlines": &variable{boolKind, "f", "granularity", helpText(
|
||||
"Aggregate at the function level.",
|
||||
"Attributes inlined functions to their first out-of-line caller.")},
|
||||
"addressnoinlines": &variable{boolKind, "f", "granularity", helpText(
|
||||
"Aggregate at the function level, including functions' addresses in the output.",
|
||||
"Attributes inlined functions to their first out-of-line caller.")},
|
||||
}
|
||||
|
||||
func helpText(s ...string) string {
|
||||
return strings.Join(s, "\n") + "\n"
|
||||
}
|
||||
|
||||
// usage returns a string describing the pprof commands and variables.
|
||||
// if commandLine is set, the output reflect cli usage.
|
||||
func usage(commandLine bool) string {
|
||||
var prefix string
|
||||
if commandLine {
|
||||
prefix = "-"
|
||||
}
|
||||
fmtHelp := func(c, d string) string {
|
||||
return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0])
|
||||
}
|
||||
|
||||
var commands []string
|
||||
for name, cmd := range pprofCommands {
|
||||
commands = append(commands, fmtHelp(prefix+name, cmd.description))
|
||||
}
|
||||
sort.Strings(commands)
|
||||
|
||||
var help string
|
||||
if commandLine {
|
||||
help = " Output formats (select only one):\n"
|
||||
} else {
|
||||
help = " Commands:\n"
|
||||
commands = append(commands, fmtHelp("o/options", "List options and their current values"))
|
||||
commands = append(commands, fmtHelp("quit/exit/^D", "Exit pprof"))
|
||||
}
|
||||
|
||||
help = help + strings.Join(commands, "\n") + "\n\n" +
|
||||
" Options:\n"
|
||||
|
||||
// Print help for variables after sorting them.
|
||||
// Collect radio variables by their group name to print them together.
|
||||
radioOptions := make(map[string][]string)
|
||||
var variables []string
|
||||
for name, vr := range pprofVariables {
|
||||
if vr.group != "" {
|
||||
radioOptions[vr.group] = append(radioOptions[vr.group], name)
|
||||
continue
|
||||
}
|
||||
variables = append(variables, fmtHelp(prefix+name, vr.help))
|
||||
}
|
||||
sort.Strings(variables)
|
||||
|
||||
help = help + strings.Join(variables, "\n") + "\n\n" +
|
||||
" Option groups (only set one per group):\n"
|
||||
|
||||
var radioStrings []string
|
||||
for radio, ops := range radioOptions {
|
||||
sort.Strings(ops)
|
||||
s := []string{fmtHelp(radio, "")}
|
||||
for _, op := range ops {
|
||||
s = append(s, " "+fmtHelp(prefix+op, pprofVariables[op].help))
|
||||
}
|
||||
|
||||
radioStrings = append(radioStrings, strings.Join(s, "\n"))
|
||||
}
|
||||
sort.Strings(radioStrings)
|
||||
return help + strings.Join(radioStrings, "\n")
|
||||
}
|
||||
|
||||
func reportHelp(c string, cum, redirect bool) string {
|
||||
h := []string{
|
||||
c + " [n] [focus_regex]* [-ignore_regex]*",
|
||||
"Include up to n samples",
|
||||
"Include samples matching focus_regex, and exclude ignore_regex.",
|
||||
}
|
||||
if cum {
|
||||
h[0] += " [-cum]"
|
||||
h = append(h, "-cum sorts the output by cumulative weight")
|
||||
}
|
||||
if redirect {
|
||||
h[0] += " >f"
|
||||
h = append(h, "Optionally save the report on the file f")
|
||||
}
|
||||
return strings.Join(h, "\n")
|
||||
}
|
||||
|
||||
func listHelp(c string, redirect bool) string {
|
||||
h := []string{
|
||||
c + "<func_regex|address> [-focus_regex]* [-ignore_regex]*",
|
||||
"Include functions matching func_regex, or including the address specified.",
|
||||
"Include samples matching focus_regex, and exclude ignore_regex.",
|
||||
}
|
||||
if redirect {
|
||||
h[0] += " >f"
|
||||
h = append(h, "Optionally save the report on the file f")
|
||||
}
|
||||
return strings.Join(h, "\n")
|
||||
}
|
||||
|
||||
// browsers returns a list of commands to attempt for web visualization.
|
||||
func browsers() []string {
|
||||
cmds := []string{"chrome", "google-chrome", "firefox"}
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return append(cmds, "/usr/bin/open")
|
||||
case "windows":
|
||||
return append(cmds, "cmd /c start")
|
||||
default:
|
||||
userBrowser := os.Getenv("BROWSER")
|
||||
if userBrowser != "" {
|
||||
cmds = append([]string{userBrowser, "sensible-browser"}, cmds...)
|
||||
} else {
|
||||
cmds = append([]string{"sensible-browser"}, cmds...)
|
||||
}
|
||||
return append(cmds, "xdg-open")
|
||||
}
|
||||
}
|
||||
|
||||
var kcachegrind = []string{"kcachegrind"}
|
||||
|
||||
// awayFromTTY saves the output in a file if it would otherwise go to
|
||||
// the terminal screen. This is used to avoid dumping binary data on
|
||||
// the screen.
|
||||
func awayFromTTY(format string) PostProcessor {
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
if output == os.Stdout && (ui.IsTerminal() || interactiveMode) {
|
||||
tempFile, err := newTempFile("", "profile", "."+format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ui.PrintErr("Generating report in ", tempFile.Name())
|
||||
output = tempFile
|
||||
}
|
||||
_, err := io.Copy(output, input)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func invokeDot(format string) PostProcessor {
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
cmd := exec.Command("dot", "-T"+format)
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("Failed to execute dot. Is Graphviz installed? Error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// massageDotSVG invokes the dot tool to generate an SVG image and alters
|
||||
// the image to have panning capabilities when viewed in a browser.
|
||||
func massageDotSVG() PostProcessor {
|
||||
generateSVG := invokeDot("svg")
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
baseSVG := new(bytes.Buffer)
|
||||
if err := generateSVG(input, baseSVG, ui); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := output.Write([]byte(svg.Massage(baseSVG.String())))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func invokeVisualizer(suffix string, visualizers []string) PostProcessor {
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deferDeleteTempFile(tempFile.Name())
|
||||
if _, err := io.Copy(tempFile, input); err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile.Close()
|
||||
// Try visualizers until one is successful
|
||||
for _, v := range visualizers {
|
||||
// Separate command and arguments for exec.Command.
|
||||
args := strings.Split(v, " ")
|
||||
if len(args) == 0 {
|
||||
continue
|
||||
}
|
||||
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
|
||||
viewer.Stderr = os.Stderr
|
||||
if err = viewer.Start(); err == nil {
|
||||
// Wait for a second so that the visualizer has a chance to
|
||||
// open the input file. This needs to be done even if we're
|
||||
// waiting for the visualizer as it can be just a wrapper that
|
||||
// spawns a browser tab and returns right away.
|
||||
defer func(t <-chan time.Time) {
|
||||
<-t
|
||||
}(time.After(time.Second))
|
||||
// On interactive mode, let the visualizer run in the background
|
||||
// so other commands can be issued.
|
||||
if !interactiveMode {
|
||||
return viewer.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// variables describe the configuration parameters recognized by pprof.
|
||||
type variables map[string]*variable
|
||||
|
||||
// variable is a single configuration parameter.
|
||||
type variable struct {
|
||||
kind int // How to interpret the value, must be one of the enums below.
|
||||
value string // Effective value. Only values appropriate for the Kind should be set.
|
||||
group string // boolKind variables with the same Group != "" cannot be set simultaneously.
|
||||
help string // Text describing the variable, in multiple lines separated by newline.
|
||||
}
|
||||
|
||||
const (
|
||||
// variable.kind must be one of these variables.
|
||||
boolKind = iota
|
||||
intKind
|
||||
floatKind
|
||||
stringKind
|
||||
)
|
||||
|
||||
// set updates the value of a variable, checking that the value is
|
||||
// suitable for the variable Kind.
|
||||
func (vars variables) set(name, value string) error {
|
||||
v := vars[name]
|
||||
if v == nil {
|
||||
return fmt.Errorf("no variable %s", name)
|
||||
}
|
||||
var err error
|
||||
switch v.kind {
|
||||
case boolKind:
|
||||
var b bool
|
||||
if b, err = stringToBool(value); err == nil {
|
||||
if v.group != "" && b == false {
|
||||
err = fmt.Errorf("%q can only be set to true", name)
|
||||
}
|
||||
}
|
||||
case intKind:
|
||||
_, err = strconv.Atoi(value)
|
||||
case floatKind:
|
||||
_, err = strconv.ParseFloat(value, 64)
|
||||
case stringKind:
|
||||
// Remove quotes, particularly useful for empty values.
|
||||
if len(value) > 1 && strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) {
|
||||
value = value[1 : len(value)-1]
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vars[name].value = value
|
||||
if group := vars[name].group; group != "" {
|
||||
for vname, vvar := range vars {
|
||||
if vvar.group == group && vname != name {
|
||||
vvar.value = "f"
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// boolValue returns the value of a boolean variable.
|
||||
func (v *variable) boolValue() bool {
|
||||
b, err := stringToBool(v.value)
|
||||
if err != nil {
|
||||
panic("unexpected value " + v.value + " for bool ")
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// intValue returns the value of an intKind variable.
|
||||
func (v *variable) intValue() int {
|
||||
i, err := strconv.Atoi(v.value)
|
||||
if err != nil {
|
||||
panic("unexpected value " + v.value + " for int ")
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// floatValue returns the value of a Float variable.
|
||||
func (v *variable) floatValue() float64 {
|
||||
f, err := strconv.ParseFloat(v.value, 64)
|
||||
if err != nil {
|
||||
panic("unexpected value " + v.value + " for float ")
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// stringValue returns a canonical representation for a variable.
|
||||
func (v *variable) stringValue() string {
|
||||
switch v.kind {
|
||||
case boolKind:
|
||||
return fmt.Sprint(v.boolValue())
|
||||
case intKind:
|
||||
return fmt.Sprint(v.intValue())
|
||||
case floatKind:
|
||||
return fmt.Sprint(v.floatValue())
|
||||
}
|
||||
return v.value
|
||||
}
|
||||
|
||||
func stringToBool(s string) (bool, error) {
|
||||
switch strings.ToLower(s) {
|
||||
case "true", "t", "yes", "y", "1", "":
|
||||
return true, nil
|
||||
case "false", "f", "no", "n", "0":
|
||||
return false, nil
|
||||
default:
|
||||
return false, fmt.Errorf(`illegal value "%s" for bool variable`, s)
|
||||
}
|
||||
}
|
||||
|
||||
// makeCopy returns a duplicate of a set of shell variables.
|
||||
func (vars variables) makeCopy() variables {
|
||||
varscopy := make(variables, len(vars))
|
||||
for n, v := range vars {
|
||||
vcopy := *v
|
||||
varscopy[n] = &vcopy
|
||||
}
|
||||
return varscopy
|
||||
}
|
287
src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go
generated
vendored
Normal file
287
src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,287 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package driver implements the core pprof functionality. It can be
|
||||
// parameterized with a flag implementation, fetch and symbolize
|
||||
// mechanisms.
|
||||
package driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/report"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
// PProf acquires a profile, and symbolizes it using a profile
|
||||
// manager. Then it generates a report formatted according to the
|
||||
// options selected through the flags package.
|
||||
func PProf(eo *plugin.Options) error {
|
||||
// Remove any temporary files created during pprof processing.
|
||||
defer cleanupTempFiles()
|
||||
|
||||
o := setDefaults(eo)
|
||||
|
||||
src, cmd, err := parseFlags(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := fetchProfiles(src, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cmd != nil {
|
||||
return generateReport(p, cmd, pprofVariables, o)
|
||||
}
|
||||
|
||||
return interactive(p, o)
|
||||
}
|
||||
|
||||
func generateReport(p *profile.Profile, cmd []string, vars variables, o *plugin.Options) error {
|
||||
p = p.Copy() // Prevent modification to the incoming profile.
|
||||
|
||||
vars = applyCommandOverrides(cmd, vars)
|
||||
|
||||
// Delay focus after configuring report to get percentages on all samples.
|
||||
relative := vars["relative_percentages"].boolValue()
|
||||
if relative {
|
||||
if err := applyFocus(p, vars, o.UI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ropt, err := reportOptions(p, vars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := pprofCommands[cmd[0]]
|
||||
if c == nil {
|
||||
panic("unexpected nil command")
|
||||
}
|
||||
ropt.OutputFormat = c.format
|
||||
if len(cmd) == 2 {
|
||||
s, err := regexp.Compile(cmd[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing argument regexp %s: %v", cmd[1], err)
|
||||
}
|
||||
ropt.Symbol = s
|
||||
}
|
||||
|
||||
rpt := report.New(p, ropt)
|
||||
if !relative {
|
||||
if err := applyFocus(p, vars, o.UI); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := aggregate(p, vars); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate the report.
|
||||
dst := new(bytes.Buffer)
|
||||
if err := report.Generate(dst, rpt, o.Obj); err != nil {
|
||||
return err
|
||||
}
|
||||
src := dst
|
||||
|
||||
// If necessary, perform any data post-processing.
|
||||
if c.postProcess != nil {
|
||||
dst = new(bytes.Buffer)
|
||||
if err := c.postProcess(src, dst, o.UI); err != nil {
|
||||
return err
|
||||
}
|
||||
src = dst
|
||||
}
|
||||
|
||||
// If no output is specified, use default visualizer.
|
||||
output := vars["output"].value
|
||||
if output == "" {
|
||||
if c.visualizer != nil {
|
||||
return c.visualizer(src, os.Stdout, o.UI)
|
||||
}
|
||||
_, err := src.WriteTo(os.Stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
// Output to specified file.
|
||||
o.UI.PrintErr("Generating report in ", output)
|
||||
out, err := os.Create(output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := src.WriteTo(out); err != nil {
|
||||
out.Close()
|
||||
return err
|
||||
}
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
func applyCommandOverrides(cmd []string, v variables) variables {
|
||||
trim, focus, tagfocus, hide := v["trim"].boolValue(), true, true, true
|
||||
|
||||
switch cmd[0] {
|
||||
case "proto", "raw":
|
||||
trim, focus, tagfocus, hide = false, false, false, false
|
||||
v.set("addresses", "t")
|
||||
case "callgrind", "kcachegrind":
|
||||
trim = false
|
||||
v.set("addresses", "t")
|
||||
case "disasm", "weblist":
|
||||
trim = false
|
||||
v.set("addressnoinlines", "t")
|
||||
case "peek":
|
||||
trim, focus, hide = false, false, false
|
||||
case "list":
|
||||
v.set("nodecount", "0")
|
||||
v.set("lines", "t")
|
||||
case "text", "top", "topproto":
|
||||
if v["nodecount"].intValue() == -1 {
|
||||
v.set("nodecount", "0")
|
||||
}
|
||||
default:
|
||||
if v["nodecount"].intValue() == -1 {
|
||||
v.set("nodecount", "80")
|
||||
}
|
||||
}
|
||||
if trim == false {
|
||||
v.set("nodecount", "0")
|
||||
v.set("nodefraction", "0")
|
||||
v.set("edgefraction", "0")
|
||||
}
|
||||
if focus == false {
|
||||
v.set("focus", "")
|
||||
v.set("ignore", "")
|
||||
}
|
||||
if tagfocus == false {
|
||||
v.set("tagfocus", "")
|
||||
v.set("tagignore", "")
|
||||
}
|
||||
if hide == false {
|
||||
v.set("hide", "")
|
||||
v.set("show", "")
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func aggregate(prof *profile.Profile, v variables) error {
|
||||
var inlines, function, filename, linenumber, address bool
|
||||
switch {
|
||||
case v["addresses"].boolValue():
|
||||
return nil
|
||||
case v["lines"].boolValue():
|
||||
inlines = true
|
||||
function = true
|
||||
filename = true
|
||||
linenumber = true
|
||||
case v["files"].boolValue():
|
||||
inlines = true
|
||||
filename = true
|
||||
case v["functions"].boolValue():
|
||||
inlines = true
|
||||
function = true
|
||||
filename = true
|
||||
case v["noinlines"].boolValue():
|
||||
function = true
|
||||
filename = true
|
||||
case v["addressnoinlines"].boolValue():
|
||||
function = true
|
||||
filename = true
|
||||
linenumber = true
|
||||
address = true
|
||||
case v["functionnameonly"].boolValue():
|
||||
inlines = true
|
||||
function = true
|
||||
default:
|
||||
return fmt.Errorf("unexpected granularity")
|
||||
}
|
||||
return prof.Aggregate(inlines, function, filename, linenumber, address)
|
||||
}
|
||||
|
||||
func reportOptions(p *profile.Profile, vars variables) (*report.Options, error) {
|
||||
si, mean := vars["sample_index"].value, vars["mean"].boolValue()
|
||||
value, meanDiv, sample, err := sampleFormat(p, si, mean)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stype := sample.Type
|
||||
if mean {
|
||||
stype = "mean_" + stype
|
||||
}
|
||||
|
||||
if vars["divide_by"].floatValue() == 0 {
|
||||
return nil, fmt.Errorf("zero divisor specified")
|
||||
}
|
||||
|
||||
ropt := &report.Options{
|
||||
CumSort: vars["cum"].boolValue(),
|
||||
CallTree: vars["call_tree"].boolValue(),
|
||||
DropNegative: vars["drop_negative"].boolValue(),
|
||||
PositivePercentages: vars["positive_percentages"].boolValue(),
|
||||
|
||||
CompactLabels: vars["compact_labels"].boolValue(),
|
||||
Ratio: 1 / vars["divide_by"].floatValue(),
|
||||
|
||||
NodeCount: vars["nodecount"].intValue(),
|
||||
NodeFraction: vars["nodefraction"].floatValue(),
|
||||
EdgeFraction: vars["edgefraction"].floatValue(),
|
||||
|
||||
SampleValue: value,
|
||||
SampleMeanDivisor: meanDiv,
|
||||
SampleType: stype,
|
||||
SampleUnit: sample.Unit,
|
||||
|
||||
OutputUnit: vars["unit"].value,
|
||||
|
||||
SourcePath: vars["source_path"].stringValue(),
|
||||
}
|
||||
|
||||
if len(p.Mapping) > 0 && p.Mapping[0].File != "" {
|
||||
ropt.Title = filepath.Base(p.Mapping[0].File)
|
||||
}
|
||||
|
||||
return ropt, nil
|
||||
}
|
||||
|
||||
type sampleValueFunc func([]int64) int64
|
||||
|
||||
// sampleFormat returns a function to extract values out of a profile.Sample,
|
||||
// and the type/units of those values.
|
||||
func sampleFormat(p *profile.Profile, sampleIndex string, mean bool) (value, meanDiv sampleValueFunc, v *profile.ValueType, err error) {
|
||||
if len(p.SampleType) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("profile has no samples")
|
||||
}
|
||||
index, err := p.SampleIndexByName(sampleIndex)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
value = valueExtractor(index)
|
||||
if mean {
|
||||
meanDiv = valueExtractor(0)
|
||||
}
|
||||
v = p.SampleType[index]
|
||||
return
|
||||
}
|
||||
|
||||
func valueExtractor(ix int) sampleValueFunc {
|
||||
return func(v []int64) int64 {
|
||||
return v[ix]
|
||||
}
|
||||
}
|
174
src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go
generated
vendored
Normal file
174
src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/measurement"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
var tagFilterRangeRx = regexp.MustCompile("([[:digit:]]+)([[:alpha:]]+)")
|
||||
|
||||
// applyFocus filters samples based on the focus/ignore options
|
||||
func applyFocus(prof *profile.Profile, v variables, ui plugin.UI) error {
|
||||
focus, err := compileRegexOption("focus", v["focus"].value, nil)
|
||||
ignore, err := compileRegexOption("ignore", v["ignore"].value, err)
|
||||
hide, err := compileRegexOption("hide", v["hide"].value, err)
|
||||
show, err := compileRegexOption("show", v["show"].value, err)
|
||||
tagfocus, err := compileTagFilter("tagfocus", v["tagfocus"].value, ui, err)
|
||||
tagignore, err := compileTagFilter("tagignore", v["tagignore"].value, ui, err)
|
||||
prunefrom, err := compileRegexOption("prune_from", v["prune_from"].value, err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fm, im, hm, hnm := prof.FilterSamplesByName(focus, ignore, hide, show)
|
||||
warnNoMatches(focus == nil || fm, "Focus", ui)
|
||||
warnNoMatches(ignore == nil || im, "Ignore", ui)
|
||||
warnNoMatches(hide == nil || hm, "Hide", ui)
|
||||
warnNoMatches(show == nil || hnm, "Show", ui)
|
||||
|
||||
tfm, tim := prof.FilterSamplesByTag(tagfocus, tagignore)
|
||||
warnNoMatches(tagfocus == nil || tfm, "TagFocus", ui)
|
||||
warnNoMatches(tagignore == nil || tim, "TagIgnore", ui)
|
||||
|
||||
tagshow, err := compileRegexOption("tagshow", v["tagshow"].value, err)
|
||||
taghide, err := compileRegexOption("taghide", v["taghide"].value, err)
|
||||
tns, tnh := prof.FilterTagsByName(tagshow, taghide)
|
||||
warnNoMatches(tagshow == nil || tns, "TagShow", ui)
|
||||
warnNoMatches(tagignore == nil || tnh, "TagHide", ui)
|
||||
|
||||
if prunefrom != nil {
|
||||
prof.PruneFrom(prunefrom)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compileRegexOption(name, value string, err error) (*regexp.Regexp, error) {
|
||||
if value == "" || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rx, err := regexp.Compile(value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing %s regexp: %v", name, err)
|
||||
}
|
||||
return rx, nil
|
||||
}
|
||||
|
||||
func compileTagFilter(name, value string, ui plugin.UI, err error) (func(*profile.Sample) bool, error) {
|
||||
if value == "" || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if numFilter := parseTagFilterRange(value); numFilter != nil {
|
||||
ui.PrintErr(name, ":Interpreted '", value, "' as range, not regexp")
|
||||
return func(s *profile.Sample) bool {
|
||||
for key, vals := range s.NumLabel {
|
||||
for _, val := range vals {
|
||||
if numFilter(val, key) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, nil
|
||||
}
|
||||
var rfx []*regexp.Regexp
|
||||
for _, tagf := range strings.Split(value, ",") {
|
||||
fx, err := regexp.Compile(tagf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing %s regexp: %v", name, err)
|
||||
}
|
||||
rfx = append(rfx, fx)
|
||||
}
|
||||
return func(s *profile.Sample) bool {
|
||||
matchedrx:
|
||||
for _, rx := range rfx {
|
||||
for key, vals := range s.Label {
|
||||
for _, val := range vals {
|
||||
if rx.MatchString(key + ":" + val) {
|
||||
continue matchedrx
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseTagFilterRange returns a function to checks if a value is
|
||||
// contained on the range described by a string. It can recognize
|
||||
// strings of the form:
|
||||
// "32kb" -- matches values == 32kb
|
||||
// ":64kb" -- matches values <= 64kb
|
||||
// "4mb:" -- matches values >= 4mb
|
||||
// "12kb:64mb" -- matches values between 12kb and 64mb (both included).
|
||||
func parseTagFilterRange(filter string) func(int64, string) bool {
|
||||
ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2)
|
||||
if len(ranges) == 0 {
|
||||
return nil // No ranges were identified
|
||||
}
|
||||
v, err := strconv.ParseInt(ranges[0][1], 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("Failed to parse int %s: %v", ranges[0][1], err))
|
||||
}
|
||||
scaledValue, unit := measurement.Scale(v, ranges[0][2], ranges[0][2])
|
||||
if len(ranges) == 1 {
|
||||
switch match := ranges[0][0]; filter {
|
||||
case match:
|
||||
return func(v int64, u string) bool {
|
||||
sv, su := measurement.Scale(v, u, unit)
|
||||
return su == unit && sv == scaledValue
|
||||
}
|
||||
case match + ":":
|
||||
return func(v int64, u string) bool {
|
||||
sv, su := measurement.Scale(v, u, unit)
|
||||
return su == unit && sv >= scaledValue
|
||||
}
|
||||
case ":" + match:
|
||||
return func(v int64, u string) bool {
|
||||
sv, su := measurement.Scale(v, u, unit)
|
||||
return su == unit && sv <= scaledValue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if filter != ranges[0][0]+":"+ranges[1][0] {
|
||||
return nil
|
||||
}
|
||||
if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil {
|
||||
panic(fmt.Errorf("Failed to parse int %s: %v", ranges[1][1], err))
|
||||
}
|
||||
scaledValue2, unit2 := measurement.Scale(v, ranges[1][2], unit)
|
||||
if unit != unit2 {
|
||||
return nil
|
||||
}
|
||||
return func(v int64, u string) bool {
|
||||
sv, su := measurement.Scale(v, u, unit)
|
||||
return su == unit && sv >= scaledValue && sv <= scaledValue2
|
||||
}
|
||||
}
|
||||
|
||||
func warnNoMatches(match bool, option string, ui plugin.UI) {
|
||||
if !match {
|
||||
ui.PrintErr(option + " expression matched no samples")
|
||||
}
|
||||
}
|
1095
src/cmd/vendor/github.com/google/pprof/internal/driver/driver_test.go
generated
vendored
Normal file
1095
src/cmd/vendor/github.com/google/pprof/internal/driver/driver_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
525
src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
generated
vendored
Normal file
525
src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
generated
vendored
Normal file
|
@ -0,0 +1,525 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/pprof/internal/measurement"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
// fetchProfiles fetches and symbolizes the profiles specified by s.
|
||||
// It will merge all the profiles it is able to retrieve, even if
|
||||
// there are some failures. It will return an error if it is unable to
|
||||
// fetch any profiles.
|
||||
func fetchProfiles(s *source, o *plugin.Options) (*profile.Profile, error) {
|
||||
sources := make([]profileSource, 0, len(s.Sources)+len(s.Base))
|
||||
for _, src := range s.Sources {
|
||||
sources = append(sources, profileSource{
|
||||
addr: src,
|
||||
source: s,
|
||||
scale: 1,
|
||||
})
|
||||
}
|
||||
for _, src := range s.Base {
|
||||
sources = append(sources, profileSource{
|
||||
addr: src,
|
||||
source: s,
|
||||
scale: -1,
|
||||
})
|
||||
}
|
||||
p, msrcs, save, cnt, err := chunkedGrab(sources, o.Fetch, o.Obj, o.UI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cnt == 0 {
|
||||
return nil, fmt.Errorf("failed to fetch any profiles")
|
||||
}
|
||||
if want, got := len(sources), cnt; want != got {
|
||||
o.UI.PrintErr(fmt.Sprintf("fetched %d profiles out of %d", got, want))
|
||||
}
|
||||
|
||||
// Symbolize the merged profile.
|
||||
if err := o.Sym.Symbolize(s.Symbolize, msrcs, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.RemoveUninteresting()
|
||||
unsourceMappings(p)
|
||||
|
||||
// Save a copy of the merged profile if there is at least one remote source.
|
||||
if save {
|
||||
dir, err := setTmpDir(o.UI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prefix := "pprof."
|
||||
if len(p.Mapping) > 0 && p.Mapping[0].File != "" {
|
||||
prefix += filepath.Base(p.Mapping[0].File) + "."
|
||||
}
|
||||
for _, s := range p.SampleType {
|
||||
prefix += s.Type + "."
|
||||
}
|
||||
|
||||
tempFile, err := newTempFile(dir, prefix, ".pb.gz")
|
||||
if err == nil {
|
||||
if err = p.Write(tempFile); err == nil {
|
||||
o.UI.PrintErr("Saved profile in ", tempFile.Name())
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
o.UI.PrintErr("Could not save profile: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.CheckValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// chunkedGrab fetches the profiles described in source and merges them into
|
||||
// a single profile. It fetches a chunk of profiles concurrently, with a maximum
|
||||
// chunk size to limit its memory usage.
|
||||
func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (*profile.Profile, plugin.MappingSources, bool, int, error) {
|
||||
const chunkSize = 64
|
||||
|
||||
var p *profile.Profile
|
||||
var msrc plugin.MappingSources
|
||||
var save bool
|
||||
var count int
|
||||
|
||||
for start := 0; start < len(sources); start += chunkSize {
|
||||
end := start + chunkSize
|
||||
if end > len(sources) {
|
||||
end = len(sources)
|
||||
}
|
||||
chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui)
|
||||
switch {
|
||||
case chunkErr != nil:
|
||||
return nil, nil, false, 0, chunkErr
|
||||
case chunkP == nil:
|
||||
continue
|
||||
case p == nil:
|
||||
p, msrc, save, count = chunkP, chunkMsrc, chunkSave, chunkCount
|
||||
default:
|
||||
p, msrc, chunkErr = combineProfiles([]*profile.Profile{p, chunkP}, []plugin.MappingSources{msrc, chunkMsrc})
|
||||
if chunkErr != nil {
|
||||
return nil, nil, false, 0, chunkErr
|
||||
}
|
||||
if chunkSave {
|
||||
save = true
|
||||
}
|
||||
count += chunkCount
|
||||
}
|
||||
}
|
||||
return p, msrc, save, count, nil
|
||||
}
|
||||
|
||||
// concurrentGrab fetches multiple profiles concurrently
|
||||
func concurrentGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (*profile.Profile, plugin.MappingSources, bool, int, error) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(sources))
|
||||
for i := range sources {
|
||||
go func(s *profileSource) {
|
||||
defer wg.Done()
|
||||
s.p, s.msrc, s.remote, s.err = grabProfile(s.source, s.addr, s.scale, fetch, obj, ui)
|
||||
}(&sources[i])
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
var save bool
|
||||
profiles := make([]*profile.Profile, 0, len(sources))
|
||||
msrcs := make([]plugin.MappingSources, 0, len(sources))
|
||||
for i := range sources {
|
||||
s := &sources[i]
|
||||
if err := s.err; err != nil {
|
||||
ui.PrintErr(s.addr + ": " + err.Error())
|
||||
continue
|
||||
}
|
||||
save = save || s.remote
|
||||
profiles = append(profiles, s.p)
|
||||
msrcs = append(msrcs, s.msrc)
|
||||
*s = profileSource{}
|
||||
}
|
||||
|
||||
if len(profiles) == 0 {
|
||||
return nil, nil, false, 0, nil
|
||||
}
|
||||
|
||||
p, msrc, err := combineProfiles(profiles, msrcs)
|
||||
if err != nil {
|
||||
return nil, nil, false, 0, err
|
||||
}
|
||||
return p, msrc, save, len(profiles), nil
|
||||
}
|
||||
|
||||
func combineProfiles(profiles []*profile.Profile, msrcs []plugin.MappingSources) (*profile.Profile, plugin.MappingSources, error) {
|
||||
// Merge profiles.
|
||||
if err := measurement.ScaleProfiles(profiles); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
p, err := profile.Merge(profiles)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Combine mapping sources.
|
||||
msrc := make(plugin.MappingSources)
|
||||
for _, ms := range msrcs {
|
||||
for m, s := range ms {
|
||||
msrc[m] = append(msrc[m], s...)
|
||||
}
|
||||
}
|
||||
return p, msrc, nil
|
||||
}
|
||||
|
||||
type profileSource struct {
|
||||
addr string
|
||||
source *source
|
||||
scale float64
|
||||
|
||||
p *profile.Profile
|
||||
msrc plugin.MappingSources
|
||||
remote bool
|
||||
err error
|
||||
}
|
||||
|
||||
// setTmpDir prepares the directory to use to save profiles retrieved
|
||||
// remotely. It is selected from PPROF_TMPDIR, defaults to $HOME/pprof.
|
||||
func setTmpDir(ui plugin.UI) (string, error) {
|
||||
if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir != "" {
|
||||
return profileDir, nil
|
||||
}
|
||||
for _, tmpDir := range []string{os.Getenv("HOME") + "/pprof", os.TempDir()} {
|
||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
||||
ui.PrintErr("Could not use temp dir ", tmpDir, ": ", err.Error())
|
||||
continue
|
||||
}
|
||||
return tmpDir, nil
|
||||
}
|
||||
return "", fmt.Errorf("failed to identify temp dir")
|
||||
}
|
||||
|
||||
// grabProfile fetches a profile. Returns the profile, sources for the
|
||||
// profile mappings, a bool indicating if the profile was fetched
|
||||
// remotely, and an error.
|
||||
func grabProfile(s *source, source string, scale float64, fetcher plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (p *profile.Profile, msrc plugin.MappingSources, remote bool, err error) {
|
||||
var src string
|
||||
duration, timeout := time.Duration(s.Seconds)*time.Second, time.Duration(s.Timeout)*time.Second
|
||||
if fetcher != nil {
|
||||
p, src, err = fetcher.Fetch(source, duration, timeout)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil || p == nil {
|
||||
// Fetch the profile over HTTP or from a file.
|
||||
p, src, err = fetch(source, duration, timeout, ui)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = p.CheckValid(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Apply local changes to the profile.
|
||||
p.Scale(scale)
|
||||
|
||||
// Update the binary locations from command line and paths.
|
||||
locateBinaries(p, s, obj, ui)
|
||||
|
||||
// Collect the source URL for all mappings.
|
||||
if src != "" {
|
||||
msrc = collectMappingSources(p, src)
|
||||
remote = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// collectMappingSources saves the mapping sources of a profile.
|
||||
func collectMappingSources(p *profile.Profile, source string) plugin.MappingSources {
|
||||
ms := plugin.MappingSources{}
|
||||
for _, m := range p.Mapping {
|
||||
src := struct {
|
||||
Source string
|
||||
Start uint64
|
||||
}{
|
||||
source, m.Start,
|
||||
}
|
||||
key := m.BuildID
|
||||
if key == "" {
|
||||
key = m.File
|
||||
}
|
||||
if key == "" {
|
||||
// If there is no build id or source file, use the source as the
|
||||
// mapping file. This will enable remote symbolization for this
|
||||
// mapping, in particular for Go profiles on the legacy format.
|
||||
// The source is reset back to empty string by unsourceMapping
|
||||
// which is called after symbolization is finished.
|
||||
m.File = source
|
||||
key = source
|
||||
}
|
||||
ms[key] = append(ms[key], src)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
// unsourceMappings iterates over the mappings in a profile and replaces file
|
||||
// set to the remote source URL by collectMappingSources back to empty string.
|
||||
func unsourceMappings(p *profile.Profile) {
|
||||
for _, m := range p.Mapping {
|
||||
if m.BuildID == "" {
|
||||
if u, err := url.Parse(m.File); err == nil && u.IsAbs() {
|
||||
m.File = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// locateBinaries searches for binary files listed in the profile and, if found,
|
||||
// updates the profile accordingly.
|
||||
func locateBinaries(p *profile.Profile, s *source, obj plugin.ObjTool, ui plugin.UI) {
|
||||
// Construct search path to examine
|
||||
searchPath := os.Getenv("PPROF_BINARY_PATH")
|
||||
if searchPath == "" {
|
||||
// Use $HOME/pprof/binaries as default directory for local symbolization binaries
|
||||
searchPath = filepath.Join(os.Getenv("HOME"), "pprof", "binaries")
|
||||
}
|
||||
mapping:
|
||||
for _, m := range p.Mapping {
|
||||
var baseName string
|
||||
if m.File != "" {
|
||||
baseName = filepath.Base(m.File)
|
||||
}
|
||||
|
||||
for _, path := range filepath.SplitList(searchPath) {
|
||||
var fileNames []string
|
||||
if m.BuildID != "" {
|
||||
fileNames = []string{filepath.Join(path, m.BuildID, baseName)}
|
||||
if matches, err := filepath.Glob(filepath.Join(path, m.BuildID, "*")); err == nil {
|
||||
fileNames = append(fileNames, matches...)
|
||||
}
|
||||
}
|
||||
if baseName != "" {
|
||||
fileNames = append(fileNames, filepath.Join(path, baseName))
|
||||
}
|
||||
for _, name := range fileNames {
|
||||
if f, err := obj.Open(name, m.Start, m.Limit, m.Offset); err == nil {
|
||||
defer f.Close()
|
||||
fileBuildID := f.BuildID()
|
||||
if m.BuildID != "" && m.BuildID != fileBuildID {
|
||||
ui.PrintErr("Ignoring local file " + name + ": build-id mismatch (" + m.BuildID + " != " + fileBuildID + ")")
|
||||
} else {
|
||||
m.File = name
|
||||
continue mapping
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Replace executable filename/buildID with the overrides from source.
|
||||
// Assumes the executable is the first Mapping entry.
|
||||
if execName, buildID := s.ExecName, s.BuildID; execName != "" || buildID != "" {
|
||||
if len(p.Mapping) == 0 {
|
||||
// If there are no mappings, add a fake mapping to attempt symbolization.
|
||||
// This is useful for some profiles generated by the golang runtime, which
|
||||
// do not include any mappings. Symbolization with a fake mapping will only
|
||||
// be successful against a non-PIE binary.
|
||||
m := &profile.Mapping{ID: 1}
|
||||
p.Mapping = []*profile.Mapping{m}
|
||||
for _, l := range p.Location {
|
||||
l.Mapping = m
|
||||
}
|
||||
}
|
||||
m := p.Mapping[0]
|
||||
if execName != "" {
|
||||
m.File = execName
|
||||
}
|
||||
if buildID != "" {
|
||||
m.BuildID = buildID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fetch fetches a profile from source, within the timeout specified,
|
||||
// producing messages through the ui. It returns the profile and the
|
||||
// url of the actual source of the profile for remote profiles.
|
||||
func fetch(source string, duration, timeout time.Duration, ui plugin.UI) (p *profile.Profile, src string, err error) {
|
||||
var f io.ReadCloser
|
||||
|
||||
if sourceURL, timeout := adjustURL(source, duration, timeout); sourceURL != "" {
|
||||
ui.Print("Fetching profile over HTTP from " + sourceURL)
|
||||
if duration > 0 {
|
||||
ui.Print(fmt.Sprintf("Please wait... (%v)", duration))
|
||||
}
|
||||
f, err = fetchURL(sourceURL, timeout)
|
||||
src = sourceURL
|
||||
} else if isPerfFile(source) {
|
||||
f, err = convertPerfData(source, ui)
|
||||
} else {
|
||||
f, err = os.Open(source)
|
||||
}
|
||||
if err == nil {
|
||||
defer f.Close()
|
||||
p, err = profile.Parse(f)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// fetchURL fetches a profile from a URL using HTTP.
|
||||
func fetchURL(source string, timeout time.Duration) (io.ReadCloser, error) {
|
||||
resp, err := httpGet(source, timeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http fetch: %v", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer resp.Body.Close()
|
||||
return nil, statusCodeError(resp)
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func statusCodeError(resp *http.Response) error {
|
||||
if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") {
|
||||
// error is from pprof endpoint
|
||||
if body, err := ioutil.ReadAll(resp.Body); err == nil {
|
||||
return fmt.Errorf("server response: %s - %s", resp.Status, body)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("server response: %s", resp.Status)
|
||||
}
|
||||
|
||||
// isPerfFile checks if a file is in perf.data format. It also returns false
|
||||
// if it encounters an error during the check.
|
||||
func isPerfFile(path string) bool {
|
||||
sourceFile, openErr := os.Open(path)
|
||||
if openErr != nil {
|
||||
return false
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
// If the file is the output of a perf record command, it should begin
|
||||
// with the string PERFILE2.
|
||||
perfHeader := []byte("PERFILE2")
|
||||
actualHeader := make([]byte, len(perfHeader))
|
||||
if _, readErr := sourceFile.Read(actualHeader); readErr != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(actualHeader, perfHeader)
|
||||
}
|
||||
|
||||
// convertPerfData converts the file at path which should be in perf.data format
|
||||
// using the perf_to_profile tool and returns the file containing the
|
||||
// profile.proto formatted data.
|
||||
func convertPerfData(perfPath string, ui plugin.UI) (*os.File, error) {
|
||||
ui.Print(fmt.Sprintf(
|
||||
"Converting %s to a profile.proto... (May take a few minutes)",
|
||||
perfPath))
|
||||
profile, err := newTempFile(os.TempDir(), "pprof_", ".pb.gz")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deferDeleteTempFile(profile.Name())
|
||||
cmd := exec.Command("perf_to_profile", perfPath, profile.Name())
|
||||
if err := cmd.Run(); err != nil {
|
||||
profile.Close()
|
||||
return nil, fmt.Errorf("failed to convert perf.data file. Try github.com/google/perf_data_converter: %v", err)
|
||||
}
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
// adjustURL validates if a profile source is a URL and returns an
|
||||
// cleaned up URL and the timeout to use for retrieval over HTTP.
|
||||
// If the source cannot be recognized as a URL it returns an empty string.
|
||||
func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) {
|
||||
u, err := url.Parse(source)
|
||||
if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") {
|
||||
// Try adding http:// to catch sources of the form hostname:port/path.
|
||||
// url.Parse treats "hostname" as the scheme.
|
||||
u, err = url.Parse("http://" + source)
|
||||
}
|
||||
if err != nil || u.Host == "" {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// Apply duration/timeout overrides to URL.
|
||||
values := u.Query()
|
||||
if duration > 0 {
|
||||
values.Set("seconds", fmt.Sprint(int(duration.Seconds())))
|
||||
} else {
|
||||
if urlSeconds := values.Get("seconds"); urlSeconds != "" {
|
||||
if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil {
|
||||
duration = time.Duration(us) * time.Second
|
||||
}
|
||||
}
|
||||
}
|
||||
if timeout <= 0 {
|
||||
if duration > 0 {
|
||||
timeout = duration + duration/2
|
||||
} else {
|
||||
timeout = 60 * time.Second
|
||||
}
|
||||
}
|
||||
u.RawQuery = values.Encode()
|
||||
return u.String(), timeout
|
||||
}
|
||||
|
||||
// httpGet is a wrapper around http.Get; it is defined as a variable
|
||||
// so it can be redefined during for testing.
|
||||
var httpGet = func(source string, timeout time.Duration) (*http.Response, error) {
|
||||
url, err := url.Parse(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if url.Scheme == "https+insecure" {
|
||||
tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
url.Scheme = "https"
|
||||
source = url.String()
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
ResponseHeaderTimeout: timeout + 5*time.Second,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
return client.Get(source)
|
||||
}
|
228
src/cmd/vendor/github.com/google/pprof/internal/driver/fetch_test.go
generated
vendored
Normal file
228
src/cmd/vendor/github.com/google/pprof/internal/driver/fetch_test.go
generated
vendored
Normal file
|
@ -0,0 +1,228 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/proftest"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
func TestSymbolizationPath(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("test assumes Unix paths")
|
||||
}
|
||||
|
||||
// Save environment variables to restore after test
|
||||
saveHome := os.Getenv("HOME")
|
||||
savePath := os.Getenv("PPROF_BINARY_PATH")
|
||||
|
||||
tempdir, err := ioutil.TempDir("", "home")
|
||||
if err != nil {
|
||||
t.Fatal("creating temp dir: ", err)
|
||||
}
|
||||
defer os.RemoveAll(tempdir)
|
||||
os.MkdirAll(filepath.Join(tempdir, "pprof", "binaries", "abcde10001"), 0700)
|
||||
os.Create(filepath.Join(tempdir, "pprof", "binaries", "abcde10001", "binary"))
|
||||
|
||||
obj := testObj{tempdir}
|
||||
os.Setenv("HOME", tempdir)
|
||||
for _, tc := range []struct {
|
||||
env, file, buildID, want string
|
||||
msgCount int
|
||||
}{
|
||||
{"", "/usr/bin/binary", "", "/usr/bin/binary", 0},
|
||||
{"", "/usr/bin/binary", "fedcb10000", "/usr/bin/binary", 0},
|
||||
{"", "/prod/path/binary", "abcde10001", filepath.Join(tempdir, "pprof/binaries/abcde10001/binary"), 0},
|
||||
{"/alternate/architecture", "/usr/bin/binary", "", "/alternate/architecture/binary", 0},
|
||||
{"/alternate/architecture", "/usr/bin/binary", "abcde10001", "/alternate/architecture/binary", 0},
|
||||
{"/nowhere:/alternate/architecture", "/usr/bin/binary", "fedcb10000", "/usr/bin/binary", 1},
|
||||
{"/nowhere:/alternate/architecture", "/usr/bin/binary", "abcde10002", "/usr/bin/binary", 1},
|
||||
} {
|
||||
os.Setenv("PPROF_BINARY_PATH", tc.env)
|
||||
p := &profile.Profile{
|
||||
Mapping: []*profile.Mapping{
|
||||
{
|
||||
File: tc.file,
|
||||
BuildID: tc.buildID,
|
||||
},
|
||||
},
|
||||
}
|
||||
s := &source{}
|
||||
locateBinaries(p, s, obj, &proftest.TestUI{t, tc.msgCount})
|
||||
if file := p.Mapping[0].File; file != tc.want {
|
||||
t.Errorf("%s:%s:%s, want %s, got %s", tc.env, tc.file, tc.buildID, tc.want, file)
|
||||
}
|
||||
}
|
||||
os.Setenv("HOME", saveHome)
|
||||
os.Setenv("PPROF_BINARY_PATH", savePath)
|
||||
}
|
||||
|
||||
func TestCollectMappingSources(t *testing.T) {
|
||||
const startAddress uint64 = 0x40000
|
||||
const url = "http://example.com"
|
||||
for _, tc := range []struct {
|
||||
file, buildID string
|
||||
want plugin.MappingSources
|
||||
}{
|
||||
{"/usr/bin/binary", "buildId", mappingSources("buildId", url, startAddress)},
|
||||
{"/usr/bin/binary", "", mappingSources("/usr/bin/binary", url, startAddress)},
|
||||
{"", "", mappingSources(url, url, startAddress)},
|
||||
} {
|
||||
p := &profile.Profile{
|
||||
Mapping: []*profile.Mapping{
|
||||
{
|
||||
File: tc.file,
|
||||
BuildID: tc.buildID,
|
||||
Start: startAddress,
|
||||
},
|
||||
},
|
||||
}
|
||||
got := collectMappingSources(p, url)
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%s:%s, want %s, got %s", tc.file, tc.buildID, tc.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnsourceMappings(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
file, buildID, want string
|
||||
}{
|
||||
{"/usr/bin/binary", "buildId", "/usr/bin/binary"},
|
||||
{"http://example.com", "", ""},
|
||||
} {
|
||||
p := &profile.Profile{
|
||||
Mapping: []*profile.Mapping{
|
||||
{
|
||||
File: tc.file,
|
||||
BuildID: tc.buildID,
|
||||
},
|
||||
},
|
||||
}
|
||||
unsourceMappings(p)
|
||||
if got := p.Mapping[0].File; got != tc.want {
|
||||
t.Errorf("%s:%s, want %s, got %s", tc.file, tc.buildID, tc.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testObj struct {
|
||||
home string
|
||||
}
|
||||
|
||||
func (o testObj) Open(file string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
switch file {
|
||||
case "/alternate/architecture/binary":
|
||||
return testFile{file, "abcde10001"}, nil
|
||||
case "/usr/bin/binary":
|
||||
return testFile{file, "fedcb10000"}, nil
|
||||
case filepath.Join(o.home, "pprof/binaries/abcde10001/binary"):
|
||||
return testFile{file, "abcde10001"}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("not found: %s", file)
|
||||
}
|
||||
func (testObj) Demangler(_ string) func(names []string) (map[string]string, error) {
|
||||
return func(names []string) (map[string]string, error) { return nil, nil }
|
||||
}
|
||||
func (testObj) Disasm(file string, start, end uint64) ([]plugin.Inst, error) { return nil, nil }
|
||||
|
||||
type testFile struct{ name, buildID string }
|
||||
|
||||
func (f testFile) Name() string { return f.name }
|
||||
func (testFile) Base() uint64 { return 0 }
|
||||
func (f testFile) BuildID() string { return f.buildID }
|
||||
func (testFile) SourceLine(addr uint64) ([]plugin.Frame, error) { return nil, nil }
|
||||
func (testFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { return nil, nil }
|
||||
func (testFile) Close() error { return nil }
|
||||
|
||||
func TestFetch(t *testing.T) {
|
||||
const path = "testdata/"
|
||||
|
||||
// Intercept http.Get calls from HTTPFetcher.
|
||||
httpGet = stubHTTPGet
|
||||
|
||||
type testcase struct {
|
||||
source, execName string
|
||||
}
|
||||
|
||||
for _, tc := range []testcase{
|
||||
{path + "go.crc32.cpu", ""},
|
||||
{path + "go.nomappings.crash", "/bin/gotest.exe"},
|
||||
{"http://localhost/profile?file=cppbench.cpu", ""},
|
||||
} {
|
||||
p, _, _, err := grabProfile(&source{ExecName: tc.execName}, tc.source, 0, nil, testObj{}, &proftest.TestUI{t, 0})
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %s", tc.source, err)
|
||||
}
|
||||
if len(p.Sample) == 0 {
|
||||
t.Errorf("%s: want non-zero samples", tc.source)
|
||||
}
|
||||
if e := tc.execName; e != "" {
|
||||
switch {
|
||||
case len(p.Mapping) == 0 || p.Mapping[0] == nil:
|
||||
t.Errorf("%s: want mapping[0].execName == %s, got no mappings", tc.source, e)
|
||||
case p.Mapping[0].File != e:
|
||||
t.Errorf("%s: want mapping[0].execName == %s, got %s", tc.source, e, p.Mapping[0].File)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mappingSources creates MappingSources map with a single item.
|
||||
func mappingSources(key, source string, start uint64) plugin.MappingSources {
|
||||
return plugin.MappingSources{
|
||||
key: []struct {
|
||||
Source string
|
||||
Start uint64
|
||||
}{
|
||||
{Source: source, Start: start},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// stubHTTPGet intercepts a call to http.Get and rewrites it to use
|
||||
// "file://" to get the profile directly from a file.
|
||||
func stubHTTPGet(source string, _ time.Duration) (*http.Response, error) {
|
||||
url, err := url.Parse(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := url.Query()
|
||||
file := values.Get("file")
|
||||
|
||||
if file == "" {
|
||||
return nil, fmt.Errorf("want .../file?profile, got %s", source)
|
||||
}
|
||||
|
||||
t := &http.Transport{}
|
||||
t.RegisterProtocol("file", http.NewFileTransport(http.Dir("testdata/")))
|
||||
|
||||
c := &http.Client{Transport: t}
|
||||
return c.Get("file:///" + file)
|
||||
}
|
430
src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go
generated
vendored
Normal file
430
src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go
generated
vendored
Normal file
|
@ -0,0 +1,430 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/report"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
var commentStart = "//:" // Sentinel for comments on options
|
||||
var tailDigitsRE = regexp.MustCompile("[0-9]+$")
|
||||
|
||||
// interactive starts a shell to read pprof commands.
|
||||
func interactive(p *profile.Profile, o *plugin.Options) error {
|
||||
// Enter command processing loop.
|
||||
o.UI.SetAutoComplete(newCompleter(functionNames(p)))
|
||||
pprofVariables.set("compact_labels", "true")
|
||||
pprofVariables["sample_index"].help += fmt.Sprintf("Or use sample_index=name, with name in %v.\n", sampleTypes(p))
|
||||
|
||||
// Do not wait for the visualizer to complete, to allow multiple
|
||||
// graphs to be visualized simultaneously.
|
||||
interactiveMode = true
|
||||
shortcuts := profileShortcuts(p)
|
||||
|
||||
greetings(p, o.UI)
|
||||
for {
|
||||
input, err := o.UI.ReadLine("(pprof) ")
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if input == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, input := range shortcuts.expand(input) {
|
||||
// Process assignments of the form variable=value
|
||||
if s := strings.SplitN(input, "=", 2); len(s) > 0 {
|
||||
name := strings.TrimSpace(s[0])
|
||||
var value string
|
||||
if len(s) == 2 {
|
||||
value = s[1]
|
||||
if comment := strings.LastIndex(value, commentStart); comment != -1 {
|
||||
value = value[:comment]
|
||||
}
|
||||
value = strings.TrimSpace(value)
|
||||
}
|
||||
if v := pprofVariables[name]; v != nil {
|
||||
if name == "sample_index" {
|
||||
// Error check sample_index=xxx to ensure xxx is a valid sample type.
|
||||
index, err := p.SampleIndexByName(value)
|
||||
if err != nil {
|
||||
o.UI.PrintErr(err)
|
||||
continue
|
||||
}
|
||||
value = p.SampleType[index].Type
|
||||
}
|
||||
if err := pprofVariables.set(name, value); err != nil {
|
||||
o.UI.PrintErr(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Allow group=variable syntax by converting into variable="".
|
||||
if v := pprofVariables[value]; v != nil && v.group == name {
|
||||
if err := pprofVariables.set(value, ""); err != nil {
|
||||
o.UI.PrintErr(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
tokens := strings.Fields(input)
|
||||
if len(tokens) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch tokens[0] {
|
||||
case "o", "options":
|
||||
printCurrentOptions(p, o.UI)
|
||||
continue
|
||||
case "exit", "quit":
|
||||
return nil
|
||||
case "help":
|
||||
commandHelp(strings.Join(tokens[1:], " "), o.UI)
|
||||
continue
|
||||
}
|
||||
|
||||
args, vars, err := parseCommandLine(tokens)
|
||||
if err == nil {
|
||||
err = generateReportWrapper(p, args, vars, o)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
o.UI.PrintErr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var generateReportWrapper = generateReport // For testing purposes.
|
||||
|
||||
// greetings prints a brief welcome and some overall profile
|
||||
// information before accepting interactive commands.
|
||||
func greetings(p *profile.Profile, ui plugin.UI) {
|
||||
ropt, err := reportOptions(p, pprofVariables)
|
||||
if err == nil {
|
||||
ui.Print(strings.Join(report.ProfileLabels(report.New(p, ropt)), "\n"))
|
||||
}
|
||||
ui.Print("Entering interactive mode (type \"help\" for commands, \"o\" for options)")
|
||||
}
|
||||
|
||||
// shortcuts represents composite commands that expand into a sequence
|
||||
// of other commands.
|
||||
type shortcuts map[string][]string
|
||||
|
||||
func (a shortcuts) expand(input string) []string {
|
||||
input = strings.TrimSpace(input)
|
||||
if a != nil {
|
||||
if r, ok := a[input]; ok {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return []string{input}
|
||||
}
|
||||
|
||||
var pprofShortcuts = shortcuts{
|
||||
":": []string{"focus=", "ignore=", "hide=", "tagfocus=", "tagignore="},
|
||||
}
|
||||
|
||||
// profileShortcuts creates macros for convenience and backward compatibility.
|
||||
func profileShortcuts(p *profile.Profile) shortcuts {
|
||||
s := pprofShortcuts
|
||||
// Add shortcuts for sample types
|
||||
for _, st := range p.SampleType {
|
||||
command := fmt.Sprintf("sample_index=%s", st.Type)
|
||||
s[st.Type] = []string{command}
|
||||
s["total_"+st.Type] = []string{"mean=0", command}
|
||||
s["mean_"+st.Type] = []string{"mean=1", command}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func sampleTypes(p *profile.Profile) []string {
|
||||
types := make([]string, len(p.SampleType))
|
||||
for i, t := range p.SampleType {
|
||||
types[i] = t.Type
|
||||
}
|
||||
return types
|
||||
}
|
||||
|
||||
func printCurrentOptions(p *profile.Profile, ui plugin.UI) {
|
||||
var args []string
|
||||
type groupInfo struct {
|
||||
set string
|
||||
values []string
|
||||
}
|
||||
groups := make(map[string]*groupInfo)
|
||||
for n, o := range pprofVariables {
|
||||
v := o.stringValue()
|
||||
comment := ""
|
||||
if g := o.group; g != "" {
|
||||
gi, ok := groups[g]
|
||||
if !ok {
|
||||
gi = &groupInfo{}
|
||||
groups[g] = gi
|
||||
}
|
||||
if o.boolValue() {
|
||||
gi.set = n
|
||||
}
|
||||
gi.values = append(gi.values, n)
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case n == "sample_index":
|
||||
st := sampleTypes(p)
|
||||
if v == "" {
|
||||
// Apply default (last sample index).
|
||||
v = st[len(st)-1]
|
||||
}
|
||||
// Add comments for all sample types in profile.
|
||||
comment = "[" + strings.Join(st, " | ") + "]"
|
||||
case n == "source_path":
|
||||
continue
|
||||
case n == "nodecount" && v == "-1":
|
||||
comment = "default"
|
||||
case v == "":
|
||||
// Add quotes for empty values.
|
||||
v = `""`
|
||||
}
|
||||
if comment != "" {
|
||||
comment = commentStart + " " + comment
|
||||
}
|
||||
args = append(args, fmt.Sprintf(" %-25s = %-20s %s", n, v, comment))
|
||||
}
|
||||
for g, vars := range groups {
|
||||
sort.Strings(vars.values)
|
||||
comment := commentStart + " [" + strings.Join(vars.values, " | ") + "]"
|
||||
args = append(args, fmt.Sprintf(" %-25s = %-20s %s", g, vars.set, comment))
|
||||
}
|
||||
sort.Strings(args)
|
||||
ui.Print(strings.Join(args, "\n"))
|
||||
}
|
||||
|
||||
// parseCommandLine parses a command and returns the pprof command to
|
||||
// execute and a set of variables for the report.
|
||||
func parseCommandLine(input []string) ([]string, variables, error) {
|
||||
cmd, args := input[:1], input[1:]
|
||||
name := cmd[0]
|
||||
|
||||
c := pprofCommands[name]
|
||||
if c == nil {
|
||||
// Attempt splitting digits on abbreviated commands (eg top10)
|
||||
if d := tailDigitsRE.FindString(name); d != "" && d != name {
|
||||
name = name[:len(name)-len(d)]
|
||||
cmd[0], args = name, append([]string{d}, args...)
|
||||
c = pprofCommands[name]
|
||||
}
|
||||
}
|
||||
if c == nil {
|
||||
return nil, nil, fmt.Errorf("Unrecognized command: %q", name)
|
||||
}
|
||||
|
||||
if c.hasParam {
|
||||
if len(args) == 0 {
|
||||
return nil, nil, fmt.Errorf("command %s requires an argument", name)
|
||||
}
|
||||
cmd = append(cmd, args[0])
|
||||
args = args[1:]
|
||||
}
|
||||
|
||||
// Copy the variables as options set in the command line are not persistent.
|
||||
vcopy := pprofVariables.makeCopy()
|
||||
|
||||
var focus, ignore string
|
||||
for i := 0; i < len(args); i++ {
|
||||
t := args[i]
|
||||
if _, err := strconv.ParseInt(t, 10, 32); err == nil {
|
||||
vcopy.set("nodecount", t)
|
||||
continue
|
||||
}
|
||||
switch t[0] {
|
||||
case '>':
|
||||
outputFile := t[1:]
|
||||
if outputFile == "" {
|
||||
i++
|
||||
if i >= len(args) {
|
||||
return nil, nil, fmt.Errorf("Unexpected end of line after >")
|
||||
}
|
||||
outputFile = args[i]
|
||||
}
|
||||
vcopy.set("output", outputFile)
|
||||
case '-':
|
||||
if t == "--cum" || t == "-cum" {
|
||||
vcopy.set("cum", "t")
|
||||
continue
|
||||
}
|
||||
ignore = catRegex(ignore, t[1:])
|
||||
default:
|
||||
focus = catRegex(focus, t)
|
||||
}
|
||||
}
|
||||
|
||||
if name == "tags" {
|
||||
updateFocusIgnore(vcopy, "tag", focus, ignore)
|
||||
} else {
|
||||
updateFocusIgnore(vcopy, "", focus, ignore)
|
||||
}
|
||||
|
||||
if vcopy["nodecount"].intValue() == -1 && (name == "text" || name == "top") {
|
||||
vcopy.set("nodecount", "10")
|
||||
}
|
||||
|
||||
return cmd, vcopy, nil
|
||||
}
|
||||
|
||||
func updateFocusIgnore(v variables, prefix, f, i string) {
|
||||
if f != "" {
|
||||
focus := prefix + "focus"
|
||||
v.set(focus, catRegex(v[focus].value, f))
|
||||
}
|
||||
|
||||
if i != "" {
|
||||
ignore := prefix + "ignore"
|
||||
v.set(ignore, catRegex(v[ignore].value, i))
|
||||
}
|
||||
}
|
||||
|
||||
func catRegex(a, b string) string {
|
||||
if a != "" && b != "" {
|
||||
return a + "|" + b
|
||||
}
|
||||
return a + b
|
||||
}
|
||||
|
||||
// commandHelp displays help and usage information for all Commands
|
||||
// and Variables or a specific Command or Variable.
|
||||
func commandHelp(args string, ui plugin.UI) {
|
||||
if args == "" {
|
||||
help := usage(false)
|
||||
help = help + `
|
||||
: Clear focus/ignore/hide/tagfocus/tagignore
|
||||
|
||||
type "help <cmd|option>" for more information
|
||||
`
|
||||
|
||||
ui.Print(help)
|
||||
return
|
||||
}
|
||||
|
||||
if c := pprofCommands[args]; c != nil {
|
||||
ui.Print(c.help(args))
|
||||
return
|
||||
}
|
||||
|
||||
if v := pprofVariables[args]; v != nil {
|
||||
ui.Print(v.help + "\n")
|
||||
return
|
||||
}
|
||||
|
||||
ui.PrintErr("Unknown command: " + args)
|
||||
}
|
||||
|
||||
// newCompleter creates an autocompletion function for a set of commands.
|
||||
func newCompleter(fns []string) func(string) string {
|
||||
return func(line string) string {
|
||||
v := pprofVariables
|
||||
switch tokens := strings.Fields(line); len(tokens) {
|
||||
case 0:
|
||||
// Nothing to complete
|
||||
case 1:
|
||||
// Single token -- complete command name
|
||||
if match := matchVariableOrCommand(v, tokens[0]); match != "" {
|
||||
return match
|
||||
}
|
||||
case 2:
|
||||
if tokens[0] == "help" {
|
||||
if match := matchVariableOrCommand(v, tokens[1]); match != "" {
|
||||
return tokens[0] + " " + match
|
||||
}
|
||||
return line
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
// Multiple tokens -- complete using functions, except for tags
|
||||
if cmd := pprofCommands[tokens[0]]; cmd != nil && tokens[0] != "tags" {
|
||||
lastTokenIdx := len(tokens) - 1
|
||||
lastToken := tokens[lastTokenIdx]
|
||||
if strings.HasPrefix(lastToken, "-") {
|
||||
lastToken = "-" + functionCompleter(lastToken[1:], fns)
|
||||
} else {
|
||||
lastToken = functionCompleter(lastToken, fns)
|
||||
}
|
||||
return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ")
|
||||
}
|
||||
}
|
||||
return line
|
||||
}
|
||||
}
|
||||
|
||||
// matchCommand attempts to match a string token to the prefix of a Command.
|
||||
func matchVariableOrCommand(v variables, token string) string {
|
||||
token = strings.ToLower(token)
|
||||
found := ""
|
||||
for cmd := range pprofCommands {
|
||||
if strings.HasPrefix(cmd, token) {
|
||||
if found != "" {
|
||||
return ""
|
||||
}
|
||||
found = cmd
|
||||
}
|
||||
}
|
||||
for variable := range v {
|
||||
if strings.HasPrefix(variable, token) {
|
||||
if found != "" {
|
||||
return ""
|
||||
}
|
||||
found = variable
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
// functionCompleter replaces provided substring with a function
|
||||
// name retrieved from a profile if a single match exists. Otherwise,
|
||||
// it returns unchanged substring. It defaults to no-op if the profile
|
||||
// is not specified.
|
||||
func functionCompleter(substring string, fns []string) string {
|
||||
found := ""
|
||||
for _, fName := range fns {
|
||||
if strings.Contains(fName, substring) {
|
||||
if found != "" {
|
||||
return substring
|
||||
}
|
||||
found = fName
|
||||
}
|
||||
}
|
||||
if found != "" {
|
||||
return found
|
||||
}
|
||||
return substring
|
||||
}
|
||||
|
||||
func functionNames(p *profile.Profile) []string {
|
||||
var fns []string
|
||||
for _, fn := range p.Function {
|
||||
fns = append(fns, fn.Name)
|
||||
}
|
||||
return fns
|
||||
}
|
325
src/cmd/vendor/github.com/google/pprof/internal/driver/interactive_test.go
generated
vendored
Normal file
325
src/cmd/vendor/github.com/google/pprof/internal/driver/interactive_test.go
generated
vendored
Normal file
|
@ -0,0 +1,325 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/report"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
func TestShell(t *testing.T) {
|
||||
p := &profile.Profile{}
|
||||
generateReportWrapper = checkValue
|
||||
defer func() { generateReportWrapper = generateReport }()
|
||||
|
||||
// Use test commands and variables to exercise interactive processing
|
||||
var savedCommands commands
|
||||
savedCommands, pprofCommands = pprofCommands, testCommands
|
||||
defer func() { pprofCommands = savedCommands }()
|
||||
|
||||
savedVariables := pprofVariables
|
||||
defer func() { pprofVariables = savedVariables }()
|
||||
|
||||
// Random interleave of independent scripts
|
||||
pprofVariables = testVariables(savedVariables)
|
||||
o := setDefaults(nil)
|
||||
o.UI = newUI(t, interleave(script, 0))
|
||||
if err := interactive(p, o); err != nil {
|
||||
t.Error("first attempt:", err)
|
||||
}
|
||||
// Random interleave of independent scripts
|
||||
pprofVariables = testVariables(savedVariables)
|
||||
o.UI = newUI(t, interleave(script, 1))
|
||||
if err := interactive(p, o); err != nil {
|
||||
t.Error("second attempt:", err)
|
||||
}
|
||||
|
||||
// Random interleave of independent scripts with shortcuts
|
||||
pprofVariables = testVariables(savedVariables)
|
||||
var scScript []string
|
||||
pprofShortcuts, scScript = makeShortcuts(interleave(script, 2), 1)
|
||||
o.UI = newUI(t, scScript)
|
||||
if err := interactive(p, o); err != nil {
|
||||
t.Error("first shortcut attempt:", err)
|
||||
}
|
||||
|
||||
// Random interleave of independent scripts with shortcuts
|
||||
pprofVariables = testVariables(savedVariables)
|
||||
pprofShortcuts, scScript = makeShortcuts(interleave(script, 1), 2)
|
||||
o.UI = newUI(t, scScript)
|
||||
if err := interactive(p, o); err != nil {
|
||||
t.Error("second shortcut attempt:", err)
|
||||
}
|
||||
|
||||
// Verify propagation of IO errors
|
||||
pprofVariables = testVariables(savedVariables)
|
||||
o.UI = newUI(t, []string{"**error**"})
|
||||
if err := interactive(p, o); err == nil {
|
||||
t.Error("expected IO error, got nil")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var testCommands = commands{
|
||||
"check": &command{report.Raw, nil, nil, true, "", ""},
|
||||
}
|
||||
|
||||
func testVariables(base variables) variables {
|
||||
v := base.makeCopy()
|
||||
|
||||
v["b"] = &variable{boolKind, "f", "", ""}
|
||||
v["bb"] = &variable{boolKind, "f", "", ""}
|
||||
v["i"] = &variable{intKind, "0", "", ""}
|
||||
v["ii"] = &variable{intKind, "0", "", ""}
|
||||
v["f"] = &variable{floatKind, "0", "", ""}
|
||||
v["ff"] = &variable{floatKind, "0", "", ""}
|
||||
v["s"] = &variable{stringKind, "", "", ""}
|
||||
v["ss"] = &variable{stringKind, "", "", ""}
|
||||
|
||||
v["ta"] = &variable{boolKind, "f", "radio", ""}
|
||||
v["tb"] = &variable{boolKind, "f", "radio", ""}
|
||||
v["tc"] = &variable{boolKind, "t", "radio", ""}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// script contains sequences of commands to be executed for testing. Commands
|
||||
// are split by semicolon and interleaved randomly, so they must be
|
||||
// independent from each other.
|
||||
var script = []string{
|
||||
"bb=true;bb=false;check bb=false;bb=yes;check bb=true",
|
||||
"b=1;check b=true;b=n;check b=false",
|
||||
"i=-1;i=-2;check i=-2;i=999999;check i=999999",
|
||||
"check ii=0;ii=-1;check ii=-1;ii=100;check ii=100",
|
||||
"f=-1;f=-2.5;check f=-2.5;f=0.0001;check f=0.0001",
|
||||
"check ff=0;ff=-1.01;check ff=-1.01;ff=100;check ff=100",
|
||||
"s=one;s=two;check s=two",
|
||||
"ss=tree;check ss=tree;ss=;check ss;ss=forest;check ss=forest",
|
||||
"ta=true;check ta=true;check tb=false;check tc=false;tb=1;check tb=true;check ta=false;check tc=false;tc=yes;check tb=false;check ta=false;check tc=true",
|
||||
}
|
||||
|
||||
func makeShortcuts(input []string, seed int) (shortcuts, []string) {
|
||||
rand.Seed(int64(seed))
|
||||
|
||||
s := shortcuts{}
|
||||
var output, chunk []string
|
||||
for _, l := range input {
|
||||
chunk = append(chunk, l)
|
||||
switch rand.Intn(3) {
|
||||
case 0:
|
||||
// Create a macro for commands in 'chunk'.
|
||||
macro := fmt.Sprintf("alias%d", len(s))
|
||||
s[macro] = chunk
|
||||
output = append(output, macro)
|
||||
chunk = nil
|
||||
case 1:
|
||||
// Append commands in 'chunk' by themselves.
|
||||
output = append(output, chunk...)
|
||||
chunk = nil
|
||||
case 2:
|
||||
// Accumulate commands into 'chunk'
|
||||
}
|
||||
}
|
||||
output = append(output, chunk...)
|
||||
return s, output
|
||||
}
|
||||
|
||||
func newUI(t *testing.T, input []string) plugin.UI {
|
||||
return &testUI{
|
||||
t: t,
|
||||
input: input,
|
||||
}
|
||||
}
|
||||
|
||||
type testUI struct {
|
||||
t *testing.T
|
||||
input []string
|
||||
index int
|
||||
}
|
||||
|
||||
func (ui *testUI) ReadLine(_ string) (string, error) {
|
||||
if ui.index >= len(ui.input) {
|
||||
return "", io.EOF
|
||||
}
|
||||
input := ui.input[ui.index]
|
||||
if input == "**error**" {
|
||||
return "", fmt.Errorf("Error: %s", input)
|
||||
}
|
||||
ui.index++
|
||||
return input, nil
|
||||
}
|
||||
|
||||
func (ui *testUI) Print(args ...interface{}) {
|
||||
}
|
||||
|
||||
func (ui *testUI) PrintErr(args ...interface{}) {
|
||||
output := fmt.Sprint(args)
|
||||
if output != "" {
|
||||
ui.t.Error(output)
|
||||
}
|
||||
}
|
||||
|
||||
func (ui *testUI) IsTerminal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (ui *testUI) SetAutoComplete(func(string) string) {
|
||||
}
|
||||
|
||||
func checkValue(p *profile.Profile, cmd []string, vars variables, o *plugin.Options) error {
|
||||
if len(cmd) != 2 {
|
||||
return fmt.Errorf("expected len(cmd)==2, got %v", cmd)
|
||||
}
|
||||
|
||||
input := cmd[1]
|
||||
args := strings.SplitN(input, "=", 2)
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("unexpected empty input")
|
||||
}
|
||||
name, value := args[0], ""
|
||||
if len(args) == 2 {
|
||||
value = args[1]
|
||||
}
|
||||
|
||||
gotv := vars[name]
|
||||
if gotv == nil {
|
||||
return fmt.Errorf("Could not find variable named %s", name)
|
||||
}
|
||||
|
||||
if got := gotv.stringValue(); got != value {
|
||||
return fmt.Errorf("Variable %s, want %s, got %s", name, value, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func interleave(input []string, seed int) []string {
|
||||
var inputs [][]string
|
||||
for _, s := range input {
|
||||
inputs = append(inputs, strings.Split(s, ";"))
|
||||
}
|
||||
rand.Seed(int64(seed))
|
||||
var output []string
|
||||
for len(inputs) > 0 {
|
||||
next := rand.Intn(len(inputs))
|
||||
output = append(output, inputs[next][0])
|
||||
if tail := inputs[next][1:]; len(tail) > 0 {
|
||||
inputs[next] = tail
|
||||
} else {
|
||||
inputs = append(inputs[:next], inputs[next+1:]...)
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func TestInteractiveCommands(t *testing.T) {
|
||||
type interactiveTestcase struct {
|
||||
input string
|
||||
want map[string]string
|
||||
}
|
||||
|
||||
testcases := []interactiveTestcase{
|
||||
{
|
||||
"top 10 --cum focus1 -ignore focus2",
|
||||
map[string]string{
|
||||
"functions": "true",
|
||||
"nodecount": "10",
|
||||
"cum": "true",
|
||||
"focus": "focus1|focus2",
|
||||
"ignore": "ignore",
|
||||
},
|
||||
},
|
||||
{
|
||||
"top10 --cum focus1 -ignore focus2",
|
||||
map[string]string{
|
||||
"functions": "true",
|
||||
"nodecount": "10",
|
||||
"cum": "true",
|
||||
"focus": "focus1|focus2",
|
||||
"ignore": "ignore",
|
||||
},
|
||||
},
|
||||
{
|
||||
"dot",
|
||||
map[string]string{
|
||||
"functions": "true",
|
||||
"nodecount": "80",
|
||||
"cum": "false",
|
||||
},
|
||||
},
|
||||
{
|
||||
"tags -ignore1 -ignore2 focus1 >out",
|
||||
map[string]string{
|
||||
"functions": "true",
|
||||
"nodecount": "80",
|
||||
"cum": "false",
|
||||
"output": "out",
|
||||
"tagfocus": "focus1",
|
||||
"tagignore": "ignore1|ignore2",
|
||||
},
|
||||
},
|
||||
{
|
||||
"weblist find -test",
|
||||
map[string]string{
|
||||
"functions": "false",
|
||||
"addressnoinlines": "true",
|
||||
"nodecount": "0",
|
||||
"cum": "false",
|
||||
"flat": "true",
|
||||
"ignore": "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
"callgrind fun -ignore >out",
|
||||
map[string]string{
|
||||
"functions": "false",
|
||||
"addresses": "true",
|
||||
"nodecount": "0",
|
||||
"cum": "false",
|
||||
"flat": "true",
|
||||
"output": "out",
|
||||
},
|
||||
},
|
||||
{
|
||||
"999",
|
||||
nil, // Error
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
cmd, vars, err := parseCommandLine(strings.Fields(tc.input))
|
||||
if tc.want == nil && err != nil {
|
||||
// Error expected
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("failed on %q: %v", tc.input, err)
|
||||
continue
|
||||
}
|
||||
vars = applyCommandOverrides(cmd, vars)
|
||||
|
||||
for n, want := range tc.want {
|
||||
if got := vars[n].stringValue(); got != want {
|
||||
t.Errorf("failed on %q, cmd=%q, %s got %s, want %s", tc.input, cmd, n, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
148
src/cmd/vendor/github.com/google/pprof/internal/driver/options.go
generated
vendored
Normal file
148
src/cmd/vendor/github.com/google/pprof/internal/driver/options.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/binutils"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/symbolizer"
|
||||
)
|
||||
|
||||
// setDefaults returns a new plugin.Options with zero fields sets to
|
||||
// sensible defaults.
|
||||
func setDefaults(o *plugin.Options) *plugin.Options {
|
||||
d := &plugin.Options{}
|
||||
if o != nil {
|
||||
*d = *o
|
||||
}
|
||||
if d.Writer == nil {
|
||||
d.Writer = oswriter{}
|
||||
}
|
||||
if d.Flagset == nil {
|
||||
d.Flagset = goFlags{}
|
||||
}
|
||||
if d.Obj == nil {
|
||||
d.Obj = &binutils.Binutils{}
|
||||
}
|
||||
if d.UI == nil {
|
||||
d.UI = &stdUI{r: bufio.NewReader(os.Stdin)}
|
||||
}
|
||||
if d.Sym == nil {
|
||||
d.Sym = &symbolizer.Symbolizer{d.Obj, d.UI}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// goFlags returns a flagset implementation based on the standard flag
|
||||
// package from the Go distribution. It implements the plugin.FlagSet
|
||||
// interface.
|
||||
type goFlags struct{}
|
||||
|
||||
func (goFlags) Bool(o string, d bool, c string) *bool {
|
||||
return flag.Bool(o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) Int(o string, d int, c string) *int {
|
||||
return flag.Int(o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) Float64(o string, d float64, c string) *float64 {
|
||||
return flag.Float64(o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) String(o, d, c string) *string {
|
||||
return flag.String(o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) BoolVar(b *bool, o string, d bool, c string) {
|
||||
flag.BoolVar(b, o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) IntVar(i *int, o string, d int, c string) {
|
||||
flag.IntVar(i, o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) Float64Var(f *float64, o string, d float64, c string) {
|
||||
flag.Float64Var(f, o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) StringVar(s *string, o, d, c string) {
|
||||
flag.StringVar(s, o, d, c)
|
||||
}
|
||||
|
||||
func (goFlags) StringList(o, d, c string) *[]*string {
|
||||
return &[]*string{flag.String(o, d, c)}
|
||||
}
|
||||
|
||||
func (goFlags) ExtraUsage() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (goFlags) Parse(usage func()) []string {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
type stdUI struct {
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
func (ui *stdUI) ReadLine(prompt string) (string, error) {
|
||||
os.Stdout.WriteString(prompt)
|
||||
return ui.r.ReadString('\n')
|
||||
}
|
||||
|
||||
func (ui *stdUI) Print(args ...interface{}) {
|
||||
ui.fprint(os.Stderr, args)
|
||||
}
|
||||
|
||||
func (ui *stdUI) PrintErr(args ...interface{}) {
|
||||
ui.fprint(os.Stderr, args)
|
||||
}
|
||||
|
||||
func (ui *stdUI) IsTerminal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (ui *stdUI) SetAutoComplete(func(string) string) {
|
||||
}
|
||||
|
||||
func (ui *stdUI) fprint(f *os.File, args []interface{}) {
|
||||
text := fmt.Sprint(args...)
|
||||
if !strings.HasSuffix(text, "\n") {
|
||||
text += "\n"
|
||||
}
|
||||
f.WriteString(text)
|
||||
}
|
||||
|
||||
// oswriter implements the Writer interface using a regular file.
|
||||
type oswriter struct{}
|
||||
|
||||
func (oswriter) Open(name string) (io.WriteCloser, error) {
|
||||
f, err := os.Create(name)
|
||||
return f, err
|
||||
}
|
54
src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go
generated
vendored
Normal file
54
src/cmd/vendor/github.com/google/pprof/internal/driver/tempfile.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// newTempFile returns a new output file in dir with the provided prefix and suffix.
|
||||
func newTempFile(dir, prefix, suffix string) (*os.File, error) {
|
||||
for index := 1; index < 10000; index++ {
|
||||
path := filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix))
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return os.Create(path)
|
||||
}
|
||||
}
|
||||
// Give up
|
||||
return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix)
|
||||
}
|
||||
|
||||
var tempFiles []string
|
||||
var tempFilesMu = sync.Mutex{}
|
||||
|
||||
// deferDeleteTempFile marks a file to be deleted by next call to Cleanup()
|
||||
func deferDeleteTempFile(path string) {
|
||||
tempFilesMu.Lock()
|
||||
tempFiles = append(tempFiles, path)
|
||||
tempFilesMu.Unlock()
|
||||
}
|
||||
|
||||
// cleanupTempFiles removes any temporary files selected for deferred cleaning.
|
||||
func cleanupTempFiles() {
|
||||
tempFilesMu.Lock()
|
||||
for _, f := range tempFiles {
|
||||
os.Remove(f)
|
||||
}
|
||||
tempFiles = nil
|
||||
tempFilesMu.Unlock()
|
||||
}
|
BIN
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/cppbench.cpu
generated
vendored
Normal file
BIN
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/cppbench.cpu
generated
vendored
Normal file
Binary file not shown.
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/file1000.src
generated
vendored
Normal file
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/file1000.src
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
line6
|
||||
line7
|
||||
line8
|
||||
line9
|
||||
line0
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
|
||||
|
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/file2000.src
generated
vendored
Normal file
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/file2000.src
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
line6
|
||||
line7
|
||||
line8
|
||||
line9
|
||||
line0
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
|
||||
|
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/file3000.src
generated
vendored
Normal file
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/file3000.src
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
line6
|
||||
line7
|
||||
line8
|
||||
line9
|
||||
line0
|
||||
line1
|
||||
line2
|
||||
line3
|
||||
line4
|
||||
line5
|
||||
|
||||
|
BIN
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/go.crc32.cpu
generated
vendored
Normal file
BIN
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/go.crc32.cpu
generated
vendored
Normal file
Binary file not shown.
BIN
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/go.nomappings.crash
generated
vendored
Normal file
BIN
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/go.nomappings.crash
generated
vendored
Normal file
Binary file not shown.
10
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.contention.cum.files.dot
generated
vendored
Normal file
10
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.contention.cum.files.dot
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid-contention" [shape=box fontsize=16 label="Build ID: buildid-contention\lComment #1\lComment #2\lType: delay\lShowing nodes accounting for 149.50ms, 100% of 149.50ms total\l"] }
|
||||
N1 [label="file3000.src\n32.77ms (21.92%)\nof 149.50ms (100%)" fontsize=20 shape=box tooltip="testdata/file3000.src (149.50ms)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N2 [label="file1000.src\n51.20ms (34.25%)" fontsize=23 shape=box tooltip="testdata/file1000.src (51.20ms)" color="#b23100" fillcolor="#eddbd5"]
|
||||
N3 [label="file2000.src\n65.54ms (43.84%)\nof 75.78ms (50.68%)" fontsize=24 shape=box tooltip="testdata/file2000.src (75.78ms)" color="#b22000" fillcolor="#edd9d5"]
|
||||
N1 -> N3 [label=" 75.78ms" weight=51 penwidth=3 color="#b22000" tooltip="testdata/file3000.src -> testdata/file2000.src (75.78ms)" labeltooltip="testdata/file3000.src -> testdata/file2000.src (75.78ms)"]
|
||||
N1 -> N2 [label=" 40.96ms" weight=28 penwidth=2 color="#b23900" tooltip="testdata/file3000.src -> testdata/file1000.src (40.96ms)" labeltooltip="testdata/file3000.src -> testdata/file1000.src (40.96ms)"]
|
||||
N3 -> N2 [label=" 10.24ms" weight=7 color="#b29775" tooltip="testdata/file2000.src -> testdata/file1000.src (10.24ms)" labeltooltip="testdata/file2000.src -> testdata/file1000.src (10.24ms)"]
|
||||
}
|
9
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.contention.flat.addresses.dot.focus.ignore
generated
vendored
Normal file
9
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.contention.flat.addresses.dot.focus.ignore
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid-contention" [shape=box fontsize=16 label="Build ID: buildid-contention\lComment #1\lComment #2\lType: delay\lShowing nodes accounting for 40.96ms, 27.40% of 149.50ms total\l"] }
|
||||
N1 [label="0000000000001000\nline1000\nfile1000.src:1\n40.96ms (27.40%)" fontsize=24 shape=box tooltip="0000000000001000 line1000 testdata/file1000.src:1 (40.96ms)" color="#b23900" fillcolor="#edddd5"]
|
||||
N2 [label="0000000000003001\nline3000\nfile3000.src:5\n0 of 40.96ms (27.40%)" fontsize=8 shape=box tooltip="0000000000003001 line3000 testdata/file3000.src:5 (40.96ms)" color="#b23900" fillcolor="#edddd5"]
|
||||
N3 [label="0000000000003001\nline3001\nfile3000.src:3\n0 of 40.96ms (27.40%)" fontsize=8 shape=box tooltip="0000000000003001 line3001 testdata/file3000.src:3 (40.96ms)" color="#b23900" fillcolor="#edddd5"]
|
||||
N2 -> N3 [label=" 40.96ms\n (inline)" weight=28 penwidth=2 color="#b23900" tooltip="0000000000003001 line3000 testdata/file3000.src:5 -> 0000000000003001 line3001 testdata/file3000.src:3 (40.96ms)" labeltooltip="0000000000003001 line3000 testdata/file3000.src:5 -> 0000000000003001 line3001 testdata/file3000.src:3 (40.96ms)"]
|
||||
N3 -> N1 [label=" 40.96ms" weight=28 penwidth=2 color="#b23900" tooltip="0000000000003001 line3001 testdata/file3000.src:3 -> 0000000000001000 line1000 testdata/file1000.src:1 (40.96ms)" labeltooltip="0000000000003001 line3001 testdata/file3000.src:3 -> 0000000000001000 line1000 testdata/file1000.src:1 (40.96ms)"]
|
||||
}
|
88
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.callgrind
generated
vendored
Normal file
88
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.callgrind
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
positions: instr line
|
||||
events: cpu(ms)
|
||||
|
||||
ob=(1) /path/to/testbinary
|
||||
fl=(1) testdata/file1000.src
|
||||
fn=(1) line1000
|
||||
0x1000 1 1100
|
||||
|
||||
ob=(1)
|
||||
fl=(2) testdata/file2000.src
|
||||
fn=(2) line2001
|
||||
+4096 9 10
|
||||
cfl=(1)
|
||||
cfn=(1)
|
||||
calls=0 * 1
|
||||
* * 1000
|
||||
|
||||
ob=(1)
|
||||
fl=(3) testdata/file3000.src
|
||||
fn=(3) line3002
|
||||
+4096 2 10
|
||||
cfl=(2)
|
||||
cfn=(4) line2000
|
||||
calls=0 * 4
|
||||
* * 1000
|
||||
|
||||
ob=(1)
|
||||
fl=(2)
|
||||
fn=(4)
|
||||
-4096 4 0
|
||||
cfl=(2)
|
||||
cfn=(2)
|
||||
calls=0 -4096 9
|
||||
* * 1010
|
||||
|
||||
ob=(1)
|
||||
fl=(3)
|
||||
fn=(5) line3000
|
||||
+4096 6 0
|
||||
cfl=(3)
|
||||
cfn=(6) line3001
|
||||
calls=0 +4096 5
|
||||
* * 1010
|
||||
|
||||
ob=(1)
|
||||
fl=(3)
|
||||
fn=(6)
|
||||
* 5 0
|
||||
cfl=(3)
|
||||
cfn=(3)
|
||||
calls=0 * 2
|
||||
* * 1010
|
||||
|
||||
ob=(1)
|
||||
fl=(3)
|
||||
fn=(5)
|
||||
+1 9 0
|
||||
cfl=(3)
|
||||
cfn=(6)
|
||||
calls=0 +1 8
|
||||
* * 100
|
||||
|
||||
ob=(1)
|
||||
fl=(3)
|
||||
fn=(6)
|
||||
* 8 0
|
||||
cfl=(1)
|
||||
cfn=(1)
|
||||
calls=0 -8193 1
|
||||
* * 100
|
||||
|
||||
ob=(1)
|
||||
fl=(3)
|
||||
fn=(5)
|
||||
+1 9 0
|
||||
cfl=(3)
|
||||
cfn=(3)
|
||||
calls=0 +1 5
|
||||
* * 10
|
||||
|
||||
ob=(1)
|
||||
fl=(3)
|
||||
fn=(3)
|
||||
* 5 0
|
||||
cfl=(2)
|
||||
cfn=(4)
|
||||
calls=0 -4098 4
|
||||
* * 10
|
0
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.comments
generated
vendored
Normal file
0
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.comments
generated
vendored
Normal file
5
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.cum.lines.text.hide
generated
vendored
Normal file
5
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.cum.lines.text.hide
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
Showing nodes accounting for 1.11s, 99.11% of 1.12s total
|
||||
flat flat% sum% cum cum%
|
||||
1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src:1
|
||||
0 0% 98.21% 1.01s 90.18% line2000 testdata/file2000.src:4
|
||||
0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src:9 (inline)
|
5
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.cum.lines.text.show
generated
vendored
Normal file
5
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.cum.lines.text.show
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
Showing nodes accounting for 1.11s, 99.11% of 1.12s total
|
||||
flat flat% sum% cum cum%
|
||||
1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src:1
|
||||
0 0% 98.21% 1.01s 90.18% line2000 testdata/file2000.src:4
|
||||
0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src:9 (inline)
|
3
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.cum.lines.topproto.hide
generated
vendored
Normal file
3
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.cum.lines.topproto.hide
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
Showing nodes accounting for 1s, 100% of 1s total
|
||||
flat flat% sum% cum cum%
|
||||
1s 100% 100% 1s 100% mangled1000 testdata/file1000.src:1
|
14
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.disasm
generated
vendored
Normal file
14
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.disasm
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
Total: 1.12s
|
||||
ROUTINE ======================== line1000
|
||||
1.10s 1.10s (flat, cum) 98.21% of Total
|
||||
1.10s 1.10s 1000: instruction one ;line1000 file1000.src:1
|
||||
. . 1001: instruction two
|
||||
. . 1002: instruction three
|
||||
. . 1003: instruction four
|
||||
ROUTINE ======================== line3000
|
||||
10ms 1.12s (flat, cum) 100% of Total
|
||||
10ms 1.01s 3000: instruction one ;line3000 file3000.src:6
|
||||
. 100ms 3001: instruction two ;line3000 file3000.src:9
|
||||
. 10ms 3002: instruction three
|
||||
. . 3003: instruction four
|
||||
. . 3004: instruction five
|
109
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.weblist
generated
vendored
Normal file
109
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.weblist
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Pprof listing</title>
|
||||
<style type="text/css">
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
}
|
||||
h1 {
|
||||
font-size: 1.5em;
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
.legend {
|
||||
font-size: 1.25em;
|
||||
}
|
||||
.line {
|
||||
color: #aaaaaa;
|
||||
}
|
||||
.nop {
|
||||
color: #aaaaaa;
|
||||
}
|
||||
.unimportant {
|
||||
color: #cccccc;
|
||||
}
|
||||
.disasmloc {
|
||||
color: #000000;
|
||||
}
|
||||
.deadsrc {
|
||||
cursor: pointer;
|
||||
}
|
||||
.deadsrc:hover {
|
||||
background-color: #eeeeee;
|
||||
}
|
||||
.livesrc {
|
||||
color: #0000ff;
|
||||
cursor: pointer;
|
||||
}
|
||||
.livesrc:hover {
|
||||
background-color: #eeeeee;
|
||||
}
|
||||
.asm {
|
||||
color: #008800;
|
||||
display: none;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript">
|
||||
function pprof_toggle_asm(e) {
|
||||
var target;
|
||||
if (!e) e = window.event;
|
||||
if (e.target) target = e.target;
|
||||
else if (e.srcElement) target = e.srcElement;
|
||||
|
||||
if (target) {
|
||||
var asm = target.nextSibling;
|
||||
if (asm && asm.className == "asm") {
|
||||
asm.style.display = (asm.style.display == "block" ? "" : "block");
|
||||
e.preventDefault();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div class="legend">File: testbinary<br>
|
||||
Type: cpu<br>
|
||||
Duration: 10s, Total samples = 1.12s (11.20%)<br>Total: 1.12s</div><h1>line1000</h1>testdata/file1000.src
|
||||
<pre onClick="pprof_toggle_asm(event)">
|
||||
Total: 1.10s 1.10s (flat, cum) 98.21%
|
||||
<span class=line> 1</span> <span class=deadsrc> 1.10s 1.10s line1 </span><span class=asm> 1.10s 1.10s 1000: instruction one <span class=disasmloc>file1000.src:1</span>
|
||||
. . 1001: instruction two <span class=disasmloc></span>
|
||||
. . 1002: instruction three <span class=disasmloc></span>
|
||||
. . 1003: instruction four <span class=disasmloc></span>
|
||||
</span>
|
||||
<span class=line> 2</span> <span class=nop> . . line2 </span>
|
||||
<span class=line> 3</span> <span class=nop> . . line3 </span>
|
||||
<span class=line> 4</span> <span class=nop> . . line4 </span>
|
||||
<span class=line> 5</span> <span class=nop> . . line5 </span>
|
||||
<span class=line> 6</span> <span class=nop> . . line6 </span>
|
||||
</pre>
|
||||
<h1>line3000</h1>testdata/file3000.src
|
||||
<pre onClick="pprof_toggle_asm(event)">
|
||||
Total: 10ms 1.12s (flat, cum) 100%
|
||||
<span class=line> 1</span> <span class=nop> . . line1 </span>
|
||||
<span class=line> 2</span> <span class=nop> . . line2 </span>
|
||||
<span class=line> 3</span> <span class=nop> . . line3 </span>
|
||||
<span class=line> 4</span> <span class=nop> . . line4 </span>
|
||||
<span class=line> 5</span> <span class=nop> . . line5 </span>
|
||||
<span class=line> 6</span> <span class=deadsrc> 10ms 1.01s line6 </span><span class=asm> 10ms 1.01s 3000: instruction one <span class=disasmloc>file3000.src:6</span>
|
||||
</span>
|
||||
<span class=line> 7</span> <span class=nop> . . line7 </span>
|
||||
<span class=line> 8</span> <span class=nop> . . line8 </span>
|
||||
<span class=line> 9</span> <span class=deadsrc> . 110ms line9 </span><span class=asm> . 100ms 3001: instruction two <span class=disasmloc>file3000.src:9</span>
|
||||
. 10ms 3002: instruction three <span class=disasmloc>file3000.src:9</span>
|
||||
. . 3003: instruction four <span class=disasmloc></span>
|
||||
. . 3004: instruction five <span class=disasmloc></span>
|
||||
</span>
|
||||
<span class=line> 10</span> <span class=nop> . . line0 </span>
|
||||
<span class=line> 11</span> <span class=nop> . . line1 </span>
|
||||
<span class=line> 12</span> <span class=nop> . . line2 </span>
|
||||
<span class=line> 13</span> <span class=nop> . . line3 </span>
|
||||
<span class=line> 14</span> <span class=nop> . . line4 </span>
|
||||
</pre>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
20
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.dot
generated
vendored
Normal file
20
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.dot
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
digraph "testbinary" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "File: testbinary" [shape=box fontsize=16 label="File: testbinary\lType: cpu\lDuration: 10s, Total samples = 1.12s (11.20%)\lShowing nodes accounting for 1.12s, 100% of 1.12s total\l"] }
|
||||
N1 [label="line1000\nfile1000.src\n1.10s (98.21%)" fontsize=24 shape=box tooltip="line1000 testdata/file1000.src (1.10s)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N1_0 [label = "key1:tag1\nkey2:tag1" fontsize=8 shape=box3d tooltip="1s"]
|
||||
N1 -> N1_0 [label=" 1s" weight=100 tooltip="1s" labeltooltip="1s"]
|
||||
N1_1 [label = "key1:tag2\nkey3:tag2" fontsize=8 shape=box3d tooltip="0.10s"]
|
||||
N1 -> N1_1 [label=" 0.10s" weight=100 tooltip="0.10s" labeltooltip="0.10s"]
|
||||
N2 [label="line3000\nfile3000.src\n0 of 1.12s (100%)" fontsize=8 shape=box tooltip="line3000 testdata/file3000.src (1.12s)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N3 [label="line3001\nfile3000.src\n0 of 1.11s (99.11%)" fontsize=8 shape=box tooltip="line3001 testdata/file3000.src (1.11s)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N4 [label="line3002\nfile3000.src\n0.01s (0.89%)\nof 1.02s (91.07%)" fontsize=10 shape=box tooltip="line3002 testdata/file3000.src (1.02s)" color="#b20400" fillcolor="#edd6d5"]
|
||||
N5 [label="line2001\nfile2000.src\n0.01s (0.89%)\nof 1.01s (90.18%)" fontsize=10 shape=box tooltip="line2001 testdata/file2000.src (1.01s)" color="#b20500" fillcolor="#edd6d5"]
|
||||
N6 [label="line2000\nfile2000.src\n0 of 1.01s (90.18%)" fontsize=8 shape=box tooltip="line2000 testdata/file2000.src (1.01s)" color="#b20500" fillcolor="#edd6d5"]
|
||||
N2 -> N3 [label=" 1.11s\n (inline)" weight=100 penwidth=5 color="#b20000" tooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (1.11s)" labeltooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (1.11s)"]
|
||||
N6 -> N5 [label=" 1.01s\n (inline)" weight=91 penwidth=5 color="#b20500" tooltip="line2000 testdata/file2000.src -> line2001 testdata/file2000.src (1.01s)" labeltooltip="line2000 testdata/file2000.src -> line2001 testdata/file2000.src (1.01s)"]
|
||||
N3 -> N4 [label=" 1.01s\n (inline)" weight=91 penwidth=5 color="#b20500" tooltip="line3001 testdata/file3000.src -> line3002 testdata/file3000.src (1.01s)" labeltooltip="line3001 testdata/file3000.src -> line3002 testdata/file3000.src (1.01s)"]
|
||||
N4 -> N6 [label=" 1.01s" weight=91 penwidth=5 color="#b20500" tooltip="line3002 testdata/file3000.src -> line2000 testdata/file2000.src (1.01s)" labeltooltip="line3002 testdata/file3000.src -> line2000 testdata/file2000.src (1.01s)"]
|
||||
N5 -> N1 [label=" 1s" weight=90 penwidth=5 color="#b20500" tooltip="line2001 testdata/file2000.src -> line1000 testdata/file1000.src (1s)" labeltooltip="line2001 testdata/file2000.src -> line1000 testdata/file1000.src (1s)"]
|
||||
N3 -> N1 [label=" 0.10s" weight=9 color="#b28b62" tooltip="line3001 testdata/file3000.src -> line1000 testdata/file1000.src (0.10s)" labeltooltip="line3001 testdata/file3000.src -> line1000 testdata/file1000.src (0.10s)"]
|
||||
}
|
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.text
generated
vendored
Normal file
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.text
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
Showing nodes accounting for 1.12s, 100% of 1.12s total
|
||||
flat flat% sum% cum cum%
|
||||
1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src
|
||||
0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src (inline)
|
||||
0.01s 0.89% 100% 1.02s 91.07% line3002 testdata/file3000.src (inline)
|
||||
0 0% 100% 1.01s 90.18% line2000 testdata/file2000.src
|
||||
0 0% 100% 1.12s 100% line3000 testdata/file3000.src
|
||||
0 0% 100% 1.11s 99.11% line3001 testdata/file3000.src (inline)
|
13
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.peek
generated
vendored
Normal file
13
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.peek
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Showing nodes accounting for 1.12s, 100% of 1.12s total
|
||||
----------------------------------------------------------+-------------
|
||||
flat flat% sum% cum cum% calls calls% + context
|
||||
----------------------------------------------------------+-------------
|
||||
1.01s 100% | line2000 testdata/file2000.src (inline)
|
||||
0.01s 0.89% 0.89% 1.01s 90.18% | line2001 testdata/file2000.src
|
||||
1s 99.01% | line1000 testdata/file1000.src
|
||||
----------------------------------------------------------+-------------
|
||||
1.11s 100% | line3000 testdata/file3000.src (inline)
|
||||
0 0% 0.89% 1.11s 99.11% | line3001 testdata/file3000.src
|
||||
1.01s 90.99% | line3002 testdata/file3000.src (inline)
|
||||
0.10s 9.01% | line1000 testdata/file1000.src
|
||||
----------------------------------------------------------+-------------
|
13
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.tags
generated
vendored
Normal file
13
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.tags
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
key1: Total 1120
|
||||
1000 (89.29%): tag1
|
||||
100 ( 8.93%): tag2
|
||||
10 ( 0.89%): tag3
|
||||
10 ( 0.89%): tag4
|
||||
|
||||
key2: Total 1020
|
||||
1010 (99.02%): tag1
|
||||
10 ( 0.98%): tag2
|
||||
|
||||
key3: Total 100
|
||||
100 ( 100%): tag2
|
||||
|
6
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.tags.focus.ignore
generated
vendored
Normal file
6
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.tags.focus.ignore
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
key1: Total 100
|
||||
100 ( 100%): tag2
|
||||
|
||||
key3: Total 100
|
||||
100 ( 100%): tag2
|
||||
|
32
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.traces
generated
vendored
Normal file
32
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.traces
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
File: testbinary
|
||||
Type: cpu
|
||||
Duration: 10s, Total samples = 1.12s (11.20%)
|
||||
-----------+-------------------------------------------------------
|
||||
key1: tag1
|
||||
key2: tag1
|
||||
1s line1000 testdata/file1000.src
|
||||
line2001 testdata/file2000.src
|
||||
line2000 testdata/file2000.src
|
||||
line3002 testdata/file3000.src
|
||||
line3001 testdata/file3000.src
|
||||
line3000 testdata/file3000.src
|
||||
-----------+-------------------------------------------------------
|
||||
key1: tag2
|
||||
key3: tag2
|
||||
100ms line1000 testdata/file1000.src
|
||||
line3001 testdata/file3000.src
|
||||
line3000 testdata/file3000.src
|
||||
-----------+-------------------------------------------------------
|
||||
key1: tag3
|
||||
key2: tag2
|
||||
10ms line2001 testdata/file2000.src
|
||||
line2000 testdata/file2000.src
|
||||
line3002 testdata/file3000.src
|
||||
line3000 testdata/file3000.src
|
||||
-----------+-------------------------------------------------------
|
||||
key1: tag4
|
||||
key2: tag1
|
||||
10ms line3002 testdata/file3000.src
|
||||
line3001 testdata/file3000.src
|
||||
line3000 testdata/file3000.src
|
||||
-----------+-------------------------------------------------------
|
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpusmall.flat.addresses.tree
generated
vendored
Normal file
17
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpusmall.flat.addresses.tree
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
Showing nodes accounting for 4s, 100% of 4s total
|
||||
Showing top 4 nodes out of 5
|
||||
----------------------------------------------------------+-------------
|
||||
flat flat% sum% cum cum% calls calls% + context
|
||||
----------------------------------------------------------+-------------
|
||||
1s 100% | 0000000000003000 [testbinary]
|
||||
1s 25.00% 25.00% 1s 25.00% | 0000000000001000 [testbinary]
|
||||
----------------------------------------------------------+-------------
|
||||
1s 25.00% 50.00% 2s 50.00% | 0000000000003000 [testbinary]
|
||||
1s 50.00% | 0000000000001000 [testbinary]
|
||||
----------------------------------------------------------+-------------
|
||||
1s 100% | 0000000000005000 [testbinary]
|
||||
1s 25.00% 75.00% 1s 25.00% | 0000000000004000 [testbinary]
|
||||
----------------------------------------------------------+-------------
|
||||
1s 25.00% 100% 2s 50.00% | 0000000000005000 [testbinary]
|
||||
1s 50.00% | 0000000000004000 [testbinary]
|
||||
----------------------------------------------------------+-------------
|
88
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.callgrind
generated
vendored
Normal file
88
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.callgrind
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
positions: instr line
|
||||
events: inuse_space(MB)
|
||||
|
||||
ob=
|
||||
fl=(1) testdata/file2000.src
|
||||
fn=(1) line2001
|
||||
0x2000 2 62
|
||||
cfl=(2) testdata/file1000.src
|
||||
cfn=(2) line1000
|
||||
calls=0 0x1000 1
|
||||
* * 0
|
||||
|
||||
ob=
|
||||
fl=(3) testdata/file3000.src
|
||||
fn=(3) line3002
|
||||
+4096 3 31
|
||||
cfl=(1)
|
||||
cfn=(4) line2000
|
||||
calls=0 * 3
|
||||
* * 0
|
||||
|
||||
ob=
|
||||
fl=(2)
|
||||
fn=(2)
|
||||
-8192 1 4
|
||||
|
||||
ob=
|
||||
fl=(1)
|
||||
fn=(4)
|
||||
+4096 3 0
|
||||
cfl=(1)
|
||||
cfn=(1)
|
||||
calls=0 +4096 2
|
||||
* * 63
|
||||
|
||||
ob=
|
||||
fl=(3)
|
||||
fn=(5) line3000
|
||||
+4096 4 0
|
||||
cfl=(3)
|
||||
cfn=(6) line3001
|
||||
calls=0 +4096 2
|
||||
* * 32
|
||||
|
||||
ob=
|
||||
fl=(3)
|
||||
fn=(6)
|
||||
* 2 0
|
||||
cfl=(3)
|
||||
cfn=(3)
|
||||
calls=0 * 3
|
||||
* * 32
|
||||
|
||||
ob=
|
||||
fl=(3)
|
||||
fn=(5)
|
||||
+1 4 0
|
||||
cfl=(3)
|
||||
cfn=(6)
|
||||
calls=0 +1 2
|
||||
* * 3
|
||||
|
||||
ob=
|
||||
fl=(3)
|
||||
fn=(6)
|
||||
* 2 0
|
||||
cfl=(2)
|
||||
cfn=(2)
|
||||
calls=0 -8193 1
|
||||
* * 3
|
||||
|
||||
ob=
|
||||
fl=(3)
|
||||
fn=(5)
|
||||
+1 4 0
|
||||
cfl=(3)
|
||||
cfn=(3)
|
||||
calls=0 +1 3
|
||||
* * 62
|
||||
|
||||
ob=
|
||||
fl=(3)
|
||||
fn=(3)
|
||||
* 3 0
|
||||
cfl=(1)
|
||||
cfn=(4)
|
||||
calls=0 -4098 3
|
||||
* * 62
|
2
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.comments
generated
vendored
Normal file
2
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.comments
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
comment
|
||||
#hidden comment
|
19
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.cum.lines.tree.focus
generated
vendored
Normal file
19
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.cum.lines.tree.focus
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
Showing nodes accounting for 62.50MB, 63.37% of 98.63MB total
|
||||
Dropped 2 nodes (cum <= 4.93MB)
|
||||
----------------------------------------------------------+-------------
|
||||
flat flat% sum% cum cum% calls calls% + context
|
||||
----------------------------------------------------------+-------------
|
||||
63.48MB 100% | line3002 testdata/file3000.src:3
|
||||
0 0% 0% 63.48MB 64.36% | line2000 testdata/file2000.src:3
|
||||
63.48MB 100% | line2001 testdata/file2000.src:2 (inline)
|
||||
----------------------------------------------------------+-------------
|
||||
63.48MB 100% | line2000 testdata/file2000.src:3 (inline)
|
||||
62.50MB 63.37% 63.37% 63.48MB 64.36% | line2001 testdata/file2000.src:2
|
||||
----------------------------------------------------------+-------------
|
||||
0 0% 63.37% 63.48MB 64.36% | line3000 testdata/file3000.src:4
|
||||
63.48MB 100% | line3002 testdata/file3000.src:3 (inline)
|
||||
----------------------------------------------------------+-------------
|
||||
63.48MB 100% | line3000 testdata/file3000.src:4 (inline)
|
||||
0 0% 63.37% 63.48MB 64.36% | line3002 testdata/file3000.src:3
|
||||
63.48MB 100% | line2000 testdata/file2000.src:3
|
||||
----------------------------------------------------------+-------------
|
19
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.cum.relative_percentages.tree.focus
generated
vendored
Normal file
19
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.cum.relative_percentages.tree.focus
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
Showing nodes accounting for 62.50MB, 98.46% of 63.48MB total
|
||||
Dropped 2 nodes (cum <= 3.17MB)
|
||||
----------------------------------------------------------+-------------
|
||||
flat flat% sum% cum cum% calls calls% + context
|
||||
----------------------------------------------------------+-------------
|
||||
63.48MB 100% | line3002 testdata/file3000.src
|
||||
0 0% 0% 63.48MB 100% | line2000 testdata/file2000.src
|
||||
63.48MB 100% | line2001 testdata/file2000.src (inline)
|
||||
----------------------------------------------------------+-------------
|
||||
63.48MB 100% | line2000 testdata/file2000.src (inline)
|
||||
62.50MB 98.46% 98.46% 63.48MB 100% | line2001 testdata/file2000.src
|
||||
----------------------------------------------------------+-------------
|
||||
0 0% 98.46% 63.48MB 100% | line3000 testdata/file3000.src
|
||||
63.48MB 100% | line3002 testdata/file3000.src (inline)
|
||||
----------------------------------------------------------+-------------
|
||||
63.48MB 100% | line3000 testdata/file3000.src (inline)
|
||||
0 0% 98.46% 63.48MB 100% | line3002 testdata/file3000.src
|
||||
63.48MB 100% | line2000 testdata/file2000.src
|
||||
----------------------------------------------------------+-------------
|
2
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.files.seconds.text
generated
vendored
Normal file
2
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.files.seconds.text
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
Showing nodes accounting for 0, 0% of 0 total
|
||||
flat flat% sum% cum cum%
|
5
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.files.text
generated
vendored
Normal file
5
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.files.text
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
Showing nodes accounting for 93.75MB, 95.05% of 98.63MB total
|
||||
Dropped 1 node (cum <= 4.93MB)
|
||||
flat flat% sum% cum cum%
|
||||
62.50MB 63.37% 63.37% 63.48MB 64.36% testdata/file2000.src
|
||||
31.25MB 31.68% 95.05% 98.63MB 100% testdata/file3000.src
|
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.inuse_objects.text
generated
vendored
Normal file
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.inuse_objects.text
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
Showing nodes accounting for 150, 100% of 150 total
|
||||
flat flat% sum% cum cum%
|
||||
80 53.33% 53.33% 130 86.67% line3002 testdata/file3000.src (inline)
|
||||
40 26.67% 80.00% 50 33.33% line2001 testdata/file2000.src (inline)
|
||||
30 20.00% 100% 30 20.00% line1000 testdata/file1000.src
|
||||
0 0% 100% 50 33.33% line2000 testdata/file2000.src
|
||||
0 0% 100% 150 100% line3000 testdata/file3000.src
|
||||
0 0% 100% 110 73.33% line3001 testdata/file3000.src (inline)
|
13
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus
generated
vendored
Normal file
13
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lShowing nodes accounting for 62.50MB, 63.37% of 98.63MB total\l"] }
|
||||
N1 [label="line2001\nfile2000.src\n62.50MB (63.37%)" fontsize=24 shape=box tooltip="line2001 testdata/file2000.src (62.50MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
NN1_0 [label = "1.56MB" fontsize=8 shape=box3d tooltip="62.50MB"]
|
||||
N1 -> NN1_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"]
|
||||
N2 [label="line3000\nfile3000.src\n0 of 62.50MB (63.37%)" fontsize=8 shape=box tooltip="line3000 testdata/file3000.src (62.50MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
N3 [label="line2000\nfile2000.src\n0 of 62.50MB (63.37%)" fontsize=8 shape=box tooltip="line2000 testdata/file2000.src (62.50MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
N4 [label="line3002\nfile3000.src\n0 of 62.50MB (63.37%)" fontsize=8 shape=box tooltip="line3002 testdata/file3000.src (62.50MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
N3 -> N1 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line2000 testdata/file2000.src -> line2001 testdata/file2000.src (62.50MB)" labeltooltip="line2000 testdata/file2000.src -> line2001 testdata/file2000.src (62.50MB)"]
|
||||
N2 -> N4 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 testdata/file3000.src -> line3002 testdata/file3000.src (62.50MB)" labeltooltip="line3000 testdata/file3000.src -> line3002 testdata/file3000.src (62.50MB)"]
|
||||
N4 -> N3 [label=" 62.50MB" weight=64 penwidth=4 color="#b21600" tooltip="line3002 testdata/file3000.src -> line2000 testdata/file2000.src (62.50MB)" labeltooltip="line3002 testdata/file3000.src -> line2000 testdata/file2000.src (62.50MB)"]
|
||||
}
|
16
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus.ignore
generated
vendored
Normal file
16
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus.ignore
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lShowing nodes accounting for 36.13MB, 36.63% of 98.63MB total\lDropped 2 nodes (cum <= 4.93MB)\l"] }
|
||||
N1 [label="line3002\nfile3000.src\n31.25MB (31.68%)\nof 32.23MB (32.67%)" fontsize=24 shape=box tooltip="line3002 testdata/file3000.src (32.23MB)" color="#b23200" fillcolor="#eddcd5"]
|
||||
NN1_0 [label = "400kB" fontsize=8 shape=box3d tooltip="31.25MB"]
|
||||
N1 -> NN1_0 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"]
|
||||
N2 [label="line3000\nfile3000.src\n0 of 36.13MB (36.63%)" fontsize=8 shape=box tooltip="line3000 testdata/file3000.src (36.13MB)" color="#b22e00" fillcolor="#eddbd5"]
|
||||
N3 [label="line3001\nfile3000.src\n0 of 36.13MB (36.63%)" fontsize=8 shape=box tooltip="line3001 testdata/file3000.src (36.13MB)" color="#b22e00" fillcolor="#eddbd5"]
|
||||
N4 [label="line1000\nfile1000.src\n4.88MB (4.95%)" fontsize=15 shape=box tooltip="line1000 testdata/file1000.src (4.88MB)" color="#b2a086" fillcolor="#edeae7"]
|
||||
NN4_0 [label = "200kB" fontsize=8 shape=box3d tooltip="3.91MB"]
|
||||
N4 -> NN4_0 [label=" 3.91MB" weight=100 tooltip="3.91MB" labeltooltip="3.91MB"]
|
||||
N2 -> N3 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (36.13MB)" labeltooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (36.13MB)"]
|
||||
N3 -> N1 [label=" 32.23MB\n (inline)" weight=33 penwidth=2 color="#b23200" tooltip="line3001 testdata/file3000.src -> line3002 testdata/file3000.src (32.23MB)" labeltooltip="line3001 testdata/file3000.src -> line3002 testdata/file3000.src (32.23MB)"]
|
||||
N3 -> N4 [label=" 3.91MB" weight=4 color="#b2a58f" tooltip="line3001 testdata/file3000.src -> line1000 testdata/file1000.src (3.91MB)" labeltooltip="line3001 testdata/file3000.src -> line1000 testdata/file1000.src (3.91MB)"]
|
||||
N1 -> N4 [label=" 0.98MB" color="#b2b0a9" tooltip="line3002 testdata/file3000.src ... line1000 testdata/file1000.src (0.98MB)" labeltooltip="line3002 testdata/file3000.src ... line1000 testdata/file1000.src (0.98MB)" style="dotted" minlen=2]
|
||||
}
|
21
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.lines.dot.focus
generated
vendored
Normal file
21
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.flat.lines.dot.focus
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lShowing nodes accounting for 67.38MB, 68.32% of 98.63MB total\l"] }
|
||||
N1 [label="line3000\nfile3000.src:4\n0 of 67.38MB (68.32%)" fontsize=8 shape=box tooltip="line3000 testdata/file3000.src:4 (67.38MB)" color="#b21300" fillcolor="#edd7d5"]
|
||||
N2 [label="line2001\nfile2000.src:2\n62.50MB (63.37%)\nof 63.48MB (64.36%)" fontsize=24 shape=box tooltip="line2001 testdata/file2000.src:2 (63.48MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
NN2_0 [label = "1.56MB" fontsize=8 shape=box3d tooltip="62.50MB"]
|
||||
N2 -> NN2_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"]
|
||||
N3 [label="line1000\nfile1000.src:1\n4.88MB (4.95%)" fontsize=13 shape=box tooltip="line1000 testdata/file1000.src:1 (4.88MB)" color="#b2a086" fillcolor="#edeae7"]
|
||||
NN3_0 [label = "200kB" fontsize=8 shape=box3d tooltip="3.91MB"]
|
||||
N3 -> NN3_0 [label=" 3.91MB" weight=100 tooltip="3.91MB" labeltooltip="3.91MB"]
|
||||
N4 [label="line3002\nfile3000.src:3\n0 of 63.48MB (64.36%)" fontsize=8 shape=box tooltip="line3002 testdata/file3000.src:3 (63.48MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
N5 [label="line3001\nfile3000.src:2\n0 of 4.88MB (4.95%)" fontsize=8 shape=box tooltip="line3001 testdata/file3000.src:2 (4.88MB)" color="#b2a086" fillcolor="#edeae7"]
|
||||
N6 [label="line2000\nfile2000.src:3\n0 of 63.48MB (64.36%)" fontsize=8 shape=box tooltip="line2000 testdata/file2000.src:3 (63.48MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
N6 -> N2 [label=" 63.48MB\n (inline)" weight=65 penwidth=4 color="#b21600" tooltip="line2000 testdata/file2000.src:3 -> line2001 testdata/file2000.src:2 (63.48MB)" labeltooltip="line2000 testdata/file2000.src:3 -> line2001 testdata/file2000.src:2 (63.48MB)"]
|
||||
N4 -> N6 [label=" 63.48MB" weight=65 penwidth=4 color="#b21600" tooltip="line3002 testdata/file3000.src:3 -> line2000 testdata/file2000.src:3 (63.48MB)" labeltooltip="line3002 testdata/file3000.src:3 -> line2000 testdata/file2000.src:3 (63.48MB)"]
|
||||
N1 -> N4 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 testdata/file3000.src:4 -> line3002 testdata/file3000.src:3 (62.50MB)" labeltooltip="line3000 testdata/file3000.src:4 -> line3002 testdata/file3000.src:3 (62.50MB)"]
|
||||
N1 -> N5 [label=" 4.88MB\n (inline)" weight=5 color="#b2a086" tooltip="line3000 testdata/file3000.src:4 -> line3001 testdata/file3000.src:2 (4.88MB)" labeltooltip="line3000 testdata/file3000.src:4 -> line3001 testdata/file3000.src:2 (4.88MB)"]
|
||||
N5 -> N3 [label=" 3.91MB" weight=4 color="#b2a58f" tooltip="line3001 testdata/file3000.src:2 -> line1000 testdata/file1000.src:1 (3.91MB)" labeltooltip="line3001 testdata/file3000.src:2 -> line1000 testdata/file1000.src:1 (3.91MB)"]
|
||||
N2 -> N3 [label=" 0.98MB" color="#b2b0a9" tooltip="line2001 testdata/file2000.src:2 -> line1000 testdata/file1000.src:1 (0.98MB)" labeltooltip="line2001 testdata/file2000.src:2 -> line1000 testdata/file1000.src:1 (0.98MB)" minlen=2]
|
||||
N5 -> N4 [label=" 0.98MB\n (inline)" color="#b2b0a9" tooltip="line3001 testdata/file3000.src:2 -> line3002 testdata/file3000.src:3 (0.98MB)" labeltooltip="line3001 testdata/file3000.src:2 -> line3002 testdata/file3000.src:3 (0.98MB)"]
|
||||
}
|
6
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.tags
generated
vendored
Normal file
6
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.tags
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
bytes: Total 150
|
||||
80 (53.33%): 400kB
|
||||
40 (26.67%): 1.56MB
|
||||
20 (13.33%): 200kB
|
||||
10 ( 6.67%): 100kB
|
||||
|
6
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.tags.unit
generated
vendored
Normal file
6
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap.tags.unit
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
bytes: Total 150
|
||||
80 (53.33%): 409600B
|
||||
40 (26.67%): 1638400B
|
||||
20 (13.33%): 204800B
|
||||
10 ( 6.67%): 102400B
|
||||
|
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap_alloc.flat.alloc_objects.text
generated
vendored
Normal file
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap_alloc.flat.alloc_objects.text
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
Showing nodes accounting for 150, 100% of 150 total
|
||||
flat flat% sum% cum cum%
|
||||
80 53.33% 53.33% 130 86.67% line3002 testdata/file3000.src (inline)
|
||||
40 26.67% 80.00% 50 33.33% line2001 testdata/file2000.src (inline)
|
||||
30 20.00% 100% 30 20.00% line1000 testdata/file1000.src
|
||||
0 0% 100% 50 33.33% line2000 testdata/file2000.src
|
||||
0 0% 100% 150 100% line3000 testdata/file3000.src
|
||||
0 0% 100% 110 73.33% line3001 testdata/file3000.src (inline)
|
18
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.focus
generated
vendored
Normal file
18
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.focus
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: alloc_space\lShowing nodes accounting for 93.75MB, 95.05% of 98.63MB total\lDropped 1 node (cum <= 4.93MB)\l"] }
|
||||
N1 [label="line3002\nfile3000.src\n31.25MB (31.68%)\nof 94.73MB (96.04%)" fontsize=20 shape=box tooltip="line3002 testdata/file3000.src (94.73MB)" color="#b20200" fillcolor="#edd5d5"]
|
||||
NN1_0 [label = "400kB" fontsize=8 shape=box3d tooltip="31.25MB"]
|
||||
N1 -> NN1_0 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"]
|
||||
N2 [label="line3000\nfile3000.src\n0 of 98.63MB (100%)" fontsize=8 shape=box tooltip="line3000 testdata/file3000.src (98.63MB)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N3 [label="line2001\nfile2000.src\n62.50MB (63.37%)\nof 63.48MB (64.36%)" fontsize=24 shape=box tooltip="line2001 testdata/file2000.src (63.48MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
NN3_0 [label = "1.56MB" fontsize=8 shape=box3d tooltip="62.50MB"]
|
||||
N3 -> NN3_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"]
|
||||
N4 [label="line2000\nfile2000.src\n0 of 63.48MB (64.36%)" fontsize=8 shape=box tooltip="line2000 testdata/file2000.src (63.48MB)" color="#b21600" fillcolor="#edd8d5"]
|
||||
N5 [label="line3001\nfile3000.src\n0 of 36.13MB (36.63%)" fontsize=8 shape=box tooltip="line3001 testdata/file3000.src (36.13MB)" color="#b22e00" fillcolor="#eddbd5"]
|
||||
N4 -> N3 [label=" 63.48MB\n (inline)" weight=65 penwidth=4 color="#b21600" tooltip="line2000 testdata/file2000.src -> line2001 testdata/file2000.src (63.48MB)" labeltooltip="line2000 testdata/file2000.src -> line2001 testdata/file2000.src (63.48MB)"]
|
||||
N1 -> N4 [label=" 63.48MB" weight=65 penwidth=4 color="#b21600" tooltip="line3002 testdata/file3000.src -> line2000 testdata/file2000.src (63.48MB)" labeltooltip="line3002 testdata/file3000.src -> line2000 testdata/file2000.src (63.48MB)" minlen=2]
|
||||
N2 -> N1 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 testdata/file3000.src -> line3002 testdata/file3000.src (62.50MB)" labeltooltip="line3000 testdata/file3000.src -> line3002 testdata/file3000.src (62.50MB)"]
|
||||
N2 -> N5 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (36.13MB)" labeltooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (36.13MB)"]
|
||||
N5 -> N1 [label=" 32.23MB\n (inline)" weight=33 penwidth=2 color="#b23200" tooltip="line3001 testdata/file3000.src -> line3002 testdata/file3000.src (32.23MB)" labeltooltip="line3001 testdata/file3000.src -> line3002 testdata/file3000.src (32.23MB)"]
|
||||
}
|
11
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.hide
generated
vendored
Normal file
11
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.hide
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: alloc_space\lShowing nodes accounting for 93.75MB, 95.05% of 98.63MB total\lDropped 1 node (cum <= 4.93MB)\l"] }
|
||||
N1 [label="line3000\nfile3000.src\n62.50MB (63.37%)\nof 98.63MB (100%)" fontsize=24 shape=box tooltip="line3000 testdata/file3000.src (98.63MB)" color="#b20000" fillcolor="#edd5d5"]
|
||||
NN1_0 [label = "1.56MB" fontsize=8 shape=box3d tooltip="62.50MB"]
|
||||
N1 -> NN1_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"]
|
||||
N2 [label="line3001\nfile3000.src\n31.25MB (31.68%)\nof 36.13MB (36.63%)" fontsize=20 shape=box tooltip="line3001 testdata/file3000.src (36.13MB)" color="#b22e00" fillcolor="#eddbd5"]
|
||||
NN2_0 [label = "400kB" fontsize=8 shape=box3d tooltip="31.25MB"]
|
||||
N2 -> NN2_0 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"]
|
||||
N1 -> N2 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (36.13MB)" labeltooltip="line3000 testdata/file3000.src -> line3001 testdata/file3000.src (36.13MB)" minlen=2]
|
||||
}
|
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.unknown.flat.functions.text
generated
vendored
Normal file
8
src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.unknown.flat.functions.text
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
Showing nodes accounting for 1.12s, 100% of 1.12s total
|
||||
flat flat% sum% cum cum%
|
||||
1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src
|
||||
0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src (inline)
|
||||
0.01s 0.89% 100% 1.02s 91.07% line3002 testdata/file3000.src (inline)
|
||||
0 0% 100% 1.01s 90.18% line2000 testdata/file2000.src
|
||||
0 0% 100% 1.12s 100% line3000 testdata/file3000.src
|
||||
0 0% 100% 1.11s 99.11% line3001 testdata/file3000.src (inline)
|
256
src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
generated
vendored
Normal file
256
src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
generated
vendored
Normal file
|
@ -0,0 +1,256 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package elfexec provides utility routines to examine ELF binaries.
|
||||
package elfexec
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
maxNoteSize = 1 << 20 // in bytes
|
||||
noteTypeGNUBuildID = 3
|
||||
)
|
||||
|
||||
// elfNote is the payload of a Note Section in an ELF file.
|
||||
type elfNote struct {
|
||||
Name string // Contents of the "name" field, omitting the trailing zero byte.
|
||||
Desc []byte // Contents of the "desc" field.
|
||||
Type uint32 // Contents of the "type" field.
|
||||
}
|
||||
|
||||
// parseNotes returns the notes from a SHT_NOTE section or PT_NOTE segment.
|
||||
func parseNotes(reader io.Reader, alignment int, order binary.ByteOrder) ([]elfNote, error) {
|
||||
r := bufio.NewReader(reader)
|
||||
|
||||
// padding returns the number of bytes required to pad the given size to an
|
||||
// alignment boundary.
|
||||
padding := func(size int) int {
|
||||
return ((size + (alignment - 1)) &^ (alignment - 1)) - size
|
||||
}
|
||||
|
||||
var notes []elfNote
|
||||
for {
|
||||
noteHeader := make([]byte, 12) // 3 4-byte words
|
||||
if _, err := io.ReadFull(r, noteHeader); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namesz := order.Uint32(noteHeader[0:4])
|
||||
descsz := order.Uint32(noteHeader[4:8])
|
||||
typ := order.Uint32(noteHeader[8:12])
|
||||
|
||||
if uint64(namesz) > uint64(maxNoteSize) {
|
||||
return nil, fmt.Errorf("note name too long (%d bytes)", namesz)
|
||||
}
|
||||
var name string
|
||||
if namesz > 0 {
|
||||
// Documentation differs as to whether namesz is meant to include the
|
||||
// trailing zero, but everyone agrees that name is null-terminated.
|
||||
// So we'll just determine the actual length after the fact.
|
||||
var err error
|
||||
name, err = r.ReadString('\x00')
|
||||
if err == io.EOF {
|
||||
return nil, fmt.Errorf("missing note name (want %d bytes)", namesz)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namesz = uint32(len(name))
|
||||
name = name[:len(name)-1]
|
||||
}
|
||||
|
||||
// Drop padding bytes until the desc field.
|
||||
for n := padding(len(noteHeader) + int(namesz)); n > 0; n-- {
|
||||
if _, err := r.ReadByte(); err == io.EOF {
|
||||
return nil, fmt.Errorf(
|
||||
"missing %d bytes of padding after note name", n)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(descsz) > uint64(maxNoteSize) {
|
||||
return nil, fmt.Errorf("note desc too long (%d bytes)", descsz)
|
||||
}
|
||||
desc := make([]byte, int(descsz))
|
||||
if _, err := io.ReadFull(r, desc); err == io.EOF {
|
||||
return nil, fmt.Errorf("missing desc (want %d bytes)", len(desc))
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notes = append(notes, elfNote{Name: name, Desc: desc, Type: typ})
|
||||
|
||||
// Drop padding bytes until the next note or the end of the section,
|
||||
// whichever comes first.
|
||||
for n := padding(len(desc)); n > 0; n-- {
|
||||
if _, err := r.ReadByte(); err == io.EOF {
|
||||
// We hit the end of the section before an alignment boundary.
|
||||
// This can happen if this section is at the end of the file or the next
|
||||
// section has a smaller alignment requirement.
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return notes, nil
|
||||
}
|
||||
|
||||
// GetBuildID returns the GNU build-ID for an ELF binary.
|
||||
//
|
||||
// If no build-ID was found but the binary was read without error, it returns
|
||||
// (nil, nil).
|
||||
func GetBuildID(binary io.ReaderAt) ([]byte, error) {
|
||||
f, err := elf.NewFile(binary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
findBuildID := func(notes []elfNote) ([]byte, error) {
|
||||
var buildID []byte
|
||||
for _, note := range notes {
|
||||
if note.Name == "GNU" && note.Type == noteTypeGNUBuildID {
|
||||
if buildID == nil {
|
||||
buildID = note.Desc
|
||||
} else {
|
||||
return nil, fmt.Errorf("multiple build ids found, don't know which to use!")
|
||||
}
|
||||
}
|
||||
}
|
||||
return buildID, nil
|
||||
}
|
||||
|
||||
for _, p := range f.Progs {
|
||||
if p.Type != elf.PT_NOTE {
|
||||
continue
|
||||
}
|
||||
notes, err := parseNotes(p.Open(), int(p.Align), f.ByteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b, err := findBuildID(notes); b != nil || err != nil {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
for _, s := range f.Sections {
|
||||
if s.Type != elf.SHT_NOTE {
|
||||
continue
|
||||
}
|
||||
notes, err := parseNotes(s.Open(), int(s.Addralign), f.ByteOrder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b, err := findBuildID(notes); b != nil || err != nil {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetBase determines the base address to subtract from virtual
|
||||
// address to get symbol table address. For an executable, the base
|
||||
// is 0. Otherwise, it's a shared library, and the base is the
|
||||
// address where the mapping starts. The kernel is special, and may
|
||||
// use the address of the _stext symbol as the mmap start. _stext
|
||||
// offset can be obtained with `nm vmlinux | grep _stext`
|
||||
func GetBase(fh *elf.FileHeader, loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, error) {
|
||||
const (
|
||||
pageSize = 4096
|
||||
// PAGE_OFFSET for PowerPC64, see arch/powerpc/Kconfig in the kernel sources.
|
||||
pageOffsetPpc64 = 0xc000000000000000
|
||||
)
|
||||
|
||||
if start == 0 && offset == 0 &&
|
||||
(limit == ^uint64(0) || limit == 0) {
|
||||
// Some tools may introduce a fake mapping that spans the entire
|
||||
// address space. Assume that the address has already been
|
||||
// adjusted, so no additional base adjustment is necessary.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
switch fh.Type {
|
||||
case elf.ET_EXEC:
|
||||
if loadSegment == nil {
|
||||
// Fixed-address executable, no adjustment.
|
||||
return 0, nil
|
||||
}
|
||||
if start == 0 && limit != 0 {
|
||||
// ChromeOS remaps its kernel to 0. Nothing else should come
|
||||
// down this path. Empirical values:
|
||||
// VADDR=0xffffffff80200000
|
||||
// stextOffset=0xffffffff80200198
|
||||
if stextOffset != nil {
|
||||
return -*stextOffset, nil
|
||||
}
|
||||
return -loadSegment.Vaddr, nil
|
||||
}
|
||||
if loadSegment.Vaddr-loadSegment.Off == start-offset {
|
||||
return offset, nil
|
||||
}
|
||||
if loadSegment.Vaddr == start-offset {
|
||||
return offset, nil
|
||||
}
|
||||
if start >= loadSegment.Vaddr && limit > start && (offset == 0 || offset == pageOffsetPpc64) {
|
||||
// Some kernels look like:
|
||||
// VADDR=0xffffffff80200000
|
||||
// stextOffset=0xffffffff80200198
|
||||
// Start=0xffffffff83200000
|
||||
// Limit=0xffffffff84200000
|
||||
// Offset=0 (0xc000000000000000 for PowerPC64)
|
||||
// So the base should be:
|
||||
if stextOffset != nil && (start%pageSize) == (*stextOffset%pageSize) {
|
||||
// perf uses the address of _stext as start. Some tools may
|
||||
// adjust for this before calling GetBase, in which case the the page
|
||||
// alignment should be different from that of stextOffset.
|
||||
return start - *stextOffset, nil
|
||||
}
|
||||
|
||||
return start - loadSegment.Vaddr, nil
|
||||
} else if start%pageSize != 0 && stextOffset != nil && *stextOffset%pageSize == start%pageSize {
|
||||
// ChromeOS remaps its kernel to 0 + start%pageSize. Nothing
|
||||
// else should come down this path. Empirical values:
|
||||
// start=0x198 limit=0x2f9fffff offset=0
|
||||
// VADDR=0xffffffff81000000
|
||||
// stextOffset=0xffffffff81000198
|
||||
return -(*stextOffset - start), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Don't know how to handle EXEC segment: %v start=0x%x limit=0x%x offset=0x%x", *loadSegment, start, limit, offset)
|
||||
case elf.ET_REL:
|
||||
if offset != 0 {
|
||||
return 0, fmt.Errorf("Don't know how to handle mapping.Offset")
|
||||
}
|
||||
return start, nil
|
||||
case elf.ET_DYN:
|
||||
if offset != 0 {
|
||||
if loadSegment == nil || loadSegment.Vaddr == 0 {
|
||||
return start - offset, nil
|
||||
}
|
||||
return 0, fmt.Errorf("Don't know how to handle mapping. Offset=%x, vaddr=%x",
|
||||
offset, loadSegment.Vaddr)
|
||||
}
|
||||
if loadSegment == nil {
|
||||
return start, nil
|
||||
}
|
||||
return start - loadSegment.Vaddr, nil
|
||||
}
|
||||
return 0, fmt.Errorf("Don't know how to handle FileHeader.Type %v", fh.Type)
|
||||
}
|
92
src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec_test.go
generated
vendored
Normal file
92
src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec_test.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package elfexec
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetBase(t *testing.T) {
|
||||
|
||||
fhExec := &elf.FileHeader{
|
||||
Type: elf.ET_EXEC,
|
||||
}
|
||||
fhRel := &elf.FileHeader{
|
||||
Type: elf.ET_REL,
|
||||
}
|
||||
fhDyn := &elf.FileHeader{
|
||||
Type: elf.ET_DYN,
|
||||
}
|
||||
lsOffset := &elf.ProgHeader{
|
||||
Vaddr: 0x400000,
|
||||
Off: 0x200000,
|
||||
}
|
||||
kernelHeader := &elf.ProgHeader{
|
||||
Vaddr: 0xffffffff81000000,
|
||||
}
|
||||
ppc64KernelHeader := &elf.ProgHeader{
|
||||
Vaddr: 0xc000000000000000,
|
||||
}
|
||||
|
||||
testcases := []struct {
|
||||
label string
|
||||
fh *elf.FileHeader
|
||||
loadSegment *elf.ProgHeader
|
||||
stextOffset *uint64
|
||||
start, limit, offset uint64
|
||||
want uint64
|
||||
wanterr bool
|
||||
}{
|
||||
{"exec", fhExec, nil, nil, 0x400000, 0, 0, 0, false},
|
||||
{"exec offset", fhExec, lsOffset, nil, 0x400000, 0x800000, 0, 0, false},
|
||||
{"exec offset 2", fhExec, lsOffset, nil, 0x200000, 0x600000, 0, 0, false},
|
||||
{"exec nomap", fhExec, nil, nil, 0, 0, 0, 0, false},
|
||||
{"exec kernel", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0xffffffff82000198, 0xffffffff83000198, 0, 0x1000000, false},
|
||||
{"exec PPC64 kernel", fhExec, ppc64KernelHeader, uint64p(0xc000000000000000), 0xc000000000000000, 0xd00000001a730000, 0xc000000000000000, 0x0, false},
|
||||
{"exec chromeos kernel", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0, 0x10197, 0, 0x7efffe68, false},
|
||||
{"exec chromeos kernel 2", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0, 0x10198, 0, 0x7efffe68, false},
|
||||
{"exec chromeos kernel 3", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0x198, 0x100000, 0, 0x7f000000, false},
|
||||
{"exec chromeos kernel 4", fhExec, kernelHeader, uint64p(0xffffffff81200198), 0x198, 0x100000, 0, 0x7ee00000, false},
|
||||
{"exec chromeos kernel unremapped", fhExec, kernelHeader, uint64p(0xffffffff810001c8), 0xffffffff834001c8, 0xffffffffc0000000, 0xffffffff834001c8, 0x2400000, false},
|
||||
{"dyn", fhDyn, nil, nil, 0x200000, 0x300000, 0, 0x200000, false},
|
||||
{"dyn offset", fhDyn, lsOffset, nil, 0x0, 0x300000, 0, 0xFFFFFFFFFFC00000, false},
|
||||
{"dyn nomap", fhDyn, nil, nil, 0x0, 0x0, 0, 0, false},
|
||||
{"rel", fhRel, nil, nil, 0x2000000, 0x3000000, 0, 0x2000000, false},
|
||||
{"rel nomap", fhRel, nil, nil, 0x0, ^uint64(0), 0, 0, false},
|
||||
{"rel offset", fhRel, nil, nil, 0x100000, 0x200000, 0x1, 0, true},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
base, err := GetBase(tc.fh, tc.loadSegment, tc.stextOffset, tc.start, tc.limit, tc.offset)
|
||||
if err != nil {
|
||||
if !tc.wanterr {
|
||||
t.Errorf("%s: want no error, got %v", tc.label, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if tc.wanterr {
|
||||
t.Errorf("%s: want error, got nil", tc.label)
|
||||
continue
|
||||
}
|
||||
if base != tc.want {
|
||||
t.Errorf("%s: want %x, got %x", tc.label, tc.want, base)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func uint64p(n uint64) *uint64 {
|
||||
return &n
|
||||
}
|
483
src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go
generated
vendored
Normal file
483
src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go
generated
vendored
Normal file
|
@ -0,0 +1,483 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/measurement"
|
||||
)
|
||||
|
||||
// DotAttributes contains details about the graph itself, giving
|
||||
// insight into how its elements should be rendered.
|
||||
type DotAttributes struct {
|
||||
Nodes map[*Node]*DotNodeAttributes // A map allowing each Node to have its own visualization option
|
||||
}
|
||||
|
||||
// DotNodeAttributes contains Node specific visualization options.
|
||||
type DotNodeAttributes struct {
|
||||
Shape string // The optional shape of the node when rendered visually
|
||||
Bold bool // If the node should be bold or not
|
||||
Peripheries int // An optional number of borders to place around a node
|
||||
URL string // An optional url link to add to a node
|
||||
Formatter func(*NodeInfo) string // An optional formatter for the node's label
|
||||
}
|
||||
|
||||
// DotConfig contains attributes about how a graph should be
|
||||
// constructed and how it should look.
|
||||
type DotConfig struct {
|
||||
Title string // The title of the DOT graph
|
||||
Labels []string // The labels for the DOT's legend
|
||||
|
||||
FormatValue func(int64) string // A formatting function for values
|
||||
FormatTag func(int64, string) string // A formatting function for numeric tags
|
||||
Total int64 // The total weight of the graph, used to compute percentages
|
||||
}
|
||||
|
||||
// Compose creates and writes a in the DOT format to the writer, using
|
||||
// the configurations given.
|
||||
func ComposeDot(w io.Writer, g *Graph, a *DotAttributes, c *DotConfig) {
|
||||
builder := &builder{w, a, c}
|
||||
|
||||
// Begin constructing DOT by adding a title and legend.
|
||||
builder.start()
|
||||
defer builder.finish()
|
||||
builder.addLegend()
|
||||
|
||||
if len(g.Nodes) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Preprocess graph to get id map and find max flat.
|
||||
nodeIDMap := make(map[*Node]int)
|
||||
hasNodelets := make(map[*Node]bool)
|
||||
|
||||
maxFlat := float64(abs64(g.Nodes[0].FlatValue()))
|
||||
for i, n := range g.Nodes {
|
||||
nodeIDMap[n] = i + 1
|
||||
if float64(abs64(n.FlatValue())) > maxFlat {
|
||||
maxFlat = float64(abs64(n.FlatValue()))
|
||||
}
|
||||
}
|
||||
|
||||
edges := EdgeMap{}
|
||||
|
||||
// Add nodes and nodelets to DOT builder.
|
||||
for _, n := range g.Nodes {
|
||||
builder.addNode(n, nodeIDMap[n], maxFlat)
|
||||
hasNodelets[n] = builder.addNodelets(n, nodeIDMap[n])
|
||||
|
||||
// Collect all edges. Use a fake node to support multiple incoming edges.
|
||||
for _, e := range n.Out {
|
||||
edges[&Node{}] = e
|
||||
}
|
||||
}
|
||||
|
||||
// Add edges to DOT builder. Sort edges by frequency as a hint to the graph layout engine.
|
||||
for _, e := range edges.Sort() {
|
||||
builder.addEdge(e, nodeIDMap[e.Src], nodeIDMap[e.Dest], hasNodelets[e.Src])
|
||||
}
|
||||
}
|
||||
|
||||
// builder wraps an io.Writer and understands how to compose DOT formatted elements.
|
||||
type builder struct {
|
||||
io.Writer
|
||||
attributes *DotAttributes
|
||||
config *DotConfig
|
||||
}
|
||||
|
||||
// start generates a title and initial node in DOT format.
|
||||
func (b *builder) start() {
|
||||
graphname := "unnamed"
|
||||
if b.config.Title != "" {
|
||||
graphname = b.config.Title
|
||||
}
|
||||
fmt.Fprintln(b, `digraph "`+graphname+`" {`)
|
||||
fmt.Fprintln(b, `node [style=filled fillcolor="#f8f8f8"]`)
|
||||
}
|
||||
|
||||
// finish closes the opening curly bracket in the constructed DOT buffer.
|
||||
func (b *builder) finish() {
|
||||
fmt.Fprintln(b, "}")
|
||||
}
|
||||
|
||||
// addLegend generates a legend in DOT format.
|
||||
func (b *builder) addLegend() {
|
||||
labels := b.config.Labels
|
||||
var title string
|
||||
if len(labels) > 0 {
|
||||
title = labels[0]
|
||||
}
|
||||
fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16 label="%s\l"] }`+"\n", title, strings.Join(labels, `\l`))
|
||||
}
|
||||
|
||||
// addNode generates a graph node in DOT format.
|
||||
func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) {
|
||||
flat, cum := node.FlatValue(), node.CumValue()
|
||||
attrs := b.attributes.Nodes[node]
|
||||
|
||||
// Populate label for node.
|
||||
var label string
|
||||
if attrs != nil && attrs.Formatter != nil {
|
||||
label = attrs.Formatter(&node.Info)
|
||||
} else {
|
||||
label = multilinePrintableName(&node.Info)
|
||||
}
|
||||
|
||||
flatValue := b.config.FormatValue(flat)
|
||||
if flat != 0 {
|
||||
label = label + fmt.Sprintf(`%s (%s)`,
|
||||
flatValue,
|
||||
strings.TrimSpace(percentage(flat, b.config.Total)))
|
||||
} else {
|
||||
label = label + "0"
|
||||
}
|
||||
cumValue := flatValue
|
||||
if cum != flat {
|
||||
if flat != 0 {
|
||||
label = label + `\n`
|
||||
} else {
|
||||
label = label + " "
|
||||
}
|
||||
cumValue = b.config.FormatValue(cum)
|
||||
label = label + fmt.Sprintf(`of %s (%s)`,
|
||||
cumValue,
|
||||
strings.TrimSpace(percentage(cum, b.config.Total)))
|
||||
}
|
||||
|
||||
// Scale font sizes from 8 to 24 based on percentage of flat frequency.
|
||||
// Use non linear growth to emphasize the size difference.
|
||||
baseFontSize, maxFontGrowth := 8, 16.0
|
||||
fontSize := baseFontSize
|
||||
if maxFlat != 0 && flat != 0 && float64(abs64(flat)) <= maxFlat {
|
||||
fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(abs64(flat))/maxFlat)))
|
||||
}
|
||||
|
||||
// Determine node shape.
|
||||
shape := "box"
|
||||
if attrs != nil && attrs.Shape != "" {
|
||||
shape = attrs.Shape
|
||||
}
|
||||
|
||||
// Create DOT attribute for node.
|
||||
attr := fmt.Sprintf(`label="%s" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`,
|
||||
label, fontSize, shape, node.Info.PrintableName(), cumValue,
|
||||
dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), false),
|
||||
dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), true))
|
||||
|
||||
// Add on extra attributes if provided.
|
||||
if attrs != nil {
|
||||
// Make bold if specified.
|
||||
if attrs.Bold {
|
||||
attr += ` style="bold,filled"`
|
||||
}
|
||||
|
||||
// Add peripheries if specified.
|
||||
if attrs.Peripheries != 0 {
|
||||
attr += fmt.Sprintf(` peripheries=%d`, attrs.Peripheries)
|
||||
}
|
||||
|
||||
// Add URL if specified. target="_blank" forces the link to open in a new tab.
|
||||
if attrs.URL != "" {
|
||||
attr += fmt.Sprintf(` URL="%s" target="_blank"`, attrs.URL)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(b, "N%d [%s]\n", nodeID, attr)
|
||||
}
|
||||
|
||||
// addNodelets generates the DOT boxes for the node tags if they exist.
|
||||
func (b *builder) addNodelets(node *Node, nodeID int) bool {
|
||||
const maxNodelets = 4 // Number of nodelets for alphanumeric labels
|
||||
const maxNumNodelets = 4 // Number of nodelets for numeric labels
|
||||
var nodelets string
|
||||
|
||||
// Populate two Tag slices, one for LabelTags and one for NumericTags.
|
||||
var ts []*Tag
|
||||
lnts := make(map[string][]*Tag, 0)
|
||||
for _, t := range node.LabelTags {
|
||||
ts = append(ts, t)
|
||||
}
|
||||
for l, tm := range node.NumericTags {
|
||||
for _, t := range tm {
|
||||
lnts[l] = append(lnts[l], t)
|
||||
}
|
||||
}
|
||||
|
||||
// For leaf nodes, print cumulative tags (includes weight from
|
||||
// children that have been deleted).
|
||||
// For internal nodes, print only flat tags.
|
||||
flatTags := len(node.Out) > 0
|
||||
|
||||
// Select the top maxNodelets alphanumeric labels by weight.
|
||||
SortTags(ts, flatTags)
|
||||
if len(ts) > maxNodelets {
|
||||
ts = ts[:maxNodelets]
|
||||
}
|
||||
for i, t := range ts {
|
||||
w := t.CumValue()
|
||||
if flatTags {
|
||||
w = t.FlatValue()
|
||||
}
|
||||
if w == 0 {
|
||||
continue
|
||||
}
|
||||
weight := b.config.FormatValue(w)
|
||||
nodelets += fmt.Sprintf(`N%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, weight)
|
||||
nodelets += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"]`+"\n", nodeID, nodeID, i, weight, weight, weight)
|
||||
if nts := lnts[t.Name]; nts != nil {
|
||||
nodelets += b.numericNodelets(nts, maxNumNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i))
|
||||
}
|
||||
}
|
||||
|
||||
if nts := lnts[""]; nts != nil {
|
||||
nodelets += b.numericNodelets(nts, maxNumNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID))
|
||||
}
|
||||
|
||||
fmt.Fprint(b, nodelets)
|
||||
return nodelets != ""
|
||||
}
|
||||
|
||||
func (b *builder) numericNodelets(nts []*Tag, maxNumNodelets int, flatTags bool, source string) string {
|
||||
nodelets := ""
|
||||
|
||||
// Collapse numeric labels into maxNumNodelets buckets, of the form:
|
||||
// 1MB..2MB, 3MB..5MB, ...
|
||||
for j, t := range b.collapsedTags(nts, maxNumNodelets, flatTags) {
|
||||
w, attr := t.CumValue(), ` style="dotted"`
|
||||
if flatTags || t.FlatValue() == t.CumValue() {
|
||||
w, attr = t.FlatValue(), ""
|
||||
}
|
||||
if w != 0 {
|
||||
weight := b.config.FormatValue(w)
|
||||
nodelets += fmt.Sprintf(`N%s_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, weight)
|
||||
nodelets += fmt.Sprintf(`%s -> N%s_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"%s]`+"\n", source, source, j, weight, weight, weight, attr)
|
||||
}
|
||||
}
|
||||
return nodelets
|
||||
}
|
||||
|
||||
// addEdge generates a graph edge in DOT format.
|
||||
func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) {
|
||||
var inline string
|
||||
if edge.Inline {
|
||||
inline = `\n (inline)`
|
||||
}
|
||||
w := b.config.FormatValue(edge.WeightValue())
|
||||
attr := fmt.Sprintf(`label=" %s%s"`, w, inline)
|
||||
if b.config.Total != 0 {
|
||||
// Note: edge.weight > b.config.Total is possible for profile diffs.
|
||||
if weight := 1 + int(min64(abs64(edge.WeightValue()*100/b.config.Total), 100)); weight > 1 {
|
||||
attr = fmt.Sprintf(`%s weight=%d`, attr, weight)
|
||||
}
|
||||
if width := 1 + int(min64(abs64(edge.WeightValue()*5/b.config.Total), 5)); width > 1 {
|
||||
attr = fmt.Sprintf(`%s penwidth=%d`, attr, width)
|
||||
}
|
||||
attr = fmt.Sprintf(`%s color="%s"`, attr,
|
||||
dotColor(float64(edge.WeightValue())/float64(abs64(b.config.Total)), false))
|
||||
}
|
||||
arrow := "->"
|
||||
if edge.Residual {
|
||||
arrow = "..."
|
||||
}
|
||||
tooltip := fmt.Sprintf(`"%s %s %s (%s)"`,
|
||||
edge.Src.Info.PrintableName(), arrow, edge.Dest.Info.PrintableName(), w)
|
||||
attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`, attr, tooltip, tooltip)
|
||||
|
||||
if edge.Residual {
|
||||
attr = attr + ` style="dotted"`
|
||||
}
|
||||
|
||||
if hasNodelets {
|
||||
// Separate children further if source has tags.
|
||||
attr = attr + " minlen=2"
|
||||
}
|
||||
|
||||
fmt.Fprintf(b, "N%d -> N%d [%s]\n", from, to, attr)
|
||||
}
|
||||
|
||||
// dotColor returns a color for the given score (between -1.0 and
|
||||
// 1.0), with -1.0 colored red, 0.0 colored grey, and 1.0 colored
|
||||
// green. If isBackground is true, then a light (low-saturation)
|
||||
// color is returned (suitable for use as a background color);
|
||||
// otherwise, a darker color is returned (suitable for use as a
|
||||
// foreground color).
|
||||
func dotColor(score float64, isBackground bool) string {
|
||||
// A float between 0.0 and 1.0, indicating the extent to which
|
||||
// colors should be shifted away from grey (to make positive and
|
||||
// negative values easier to distinguish, and to make more use of
|
||||
// the color range.)
|
||||
const shift = 0.7
|
||||
|
||||
// Saturation and value (in hsv colorspace) for background colors.
|
||||
const bgSaturation = 0.1
|
||||
const bgValue = 0.93
|
||||
|
||||
// Saturation and value (in hsv colorspace) for foreground colors.
|
||||
const fgSaturation = 1.0
|
||||
const fgValue = 0.7
|
||||
|
||||
// Choose saturation and value based on isBackground.
|
||||
var saturation float64
|
||||
var value float64
|
||||
if isBackground {
|
||||
saturation = bgSaturation
|
||||
value = bgValue
|
||||
} else {
|
||||
saturation = fgSaturation
|
||||
value = fgValue
|
||||
}
|
||||
|
||||
// Limit the score values to the range [-1.0, 1.0].
|
||||
score = math.Max(-1.0, math.Min(1.0, score))
|
||||
|
||||
// Reduce saturation near score=0 (so it is colored grey, rather than yellow).
|
||||
if math.Abs(score) < 0.2 {
|
||||
saturation *= math.Abs(score) / 0.2
|
||||
}
|
||||
|
||||
// Apply 'shift' to move scores away from 0.0 (grey).
|
||||
if score > 0.0 {
|
||||
score = math.Pow(score, (1.0 - shift))
|
||||
}
|
||||
if score < 0.0 {
|
||||
score = -math.Pow(-score, (1.0 - shift))
|
||||
}
|
||||
|
||||
var r, g, b float64 // red, green, blue
|
||||
if score < 0.0 {
|
||||
g = value
|
||||
r = value * (1 + saturation*score)
|
||||
} else {
|
||||
r = value
|
||||
g = value * (1 - saturation*score)
|
||||
}
|
||||
b = value * (1 - saturation)
|
||||
return fmt.Sprintf("#%02x%02x%02x", uint8(r*255.0), uint8(g*255.0), uint8(b*255.0))
|
||||
}
|
||||
|
||||
// percentage computes the percentage of total of a value, and encodes
|
||||
// it as a string. At least two digits of precision are printed.
|
||||
func percentage(value, total int64) string {
|
||||
var ratio float64
|
||||
if total != 0 {
|
||||
ratio = math.Abs(float64(value)/float64(total)) * 100
|
||||
}
|
||||
switch {
|
||||
case math.Abs(ratio) >= 99.95 && math.Abs(ratio) <= 100.05:
|
||||
return " 100%"
|
||||
case math.Abs(ratio) >= 1.0:
|
||||
return fmt.Sprintf("%5.2f%%", ratio)
|
||||
default:
|
||||
return fmt.Sprintf("%5.2g%%", ratio)
|
||||
}
|
||||
}
|
||||
|
||||
func multilinePrintableName(info *NodeInfo) string {
|
||||
infoCopy := *info
|
||||
infoCopy.Name = strings.Replace(infoCopy.Name, "::", `\n`, -1)
|
||||
infoCopy.Name = strings.Replace(infoCopy.Name, ".", `\n`, -1)
|
||||
if infoCopy.File != "" {
|
||||
infoCopy.File = filepath.Base(infoCopy.File)
|
||||
}
|
||||
return strings.Join(infoCopy.NameComponents(), `\n`) + `\n`
|
||||
}
|
||||
|
||||
// collapsedTags trims and sorts a slice of tags.
|
||||
func (b *builder) collapsedTags(ts []*Tag, count int, flatTags bool) []*Tag {
|
||||
ts = SortTags(ts, flatTags)
|
||||
if len(ts) <= count {
|
||||
return ts
|
||||
}
|
||||
|
||||
tagGroups := make([][]*Tag, count)
|
||||
for i, t := range (ts)[:count] {
|
||||
tagGroups[i] = []*Tag{t}
|
||||
}
|
||||
for _, t := range (ts)[count:] {
|
||||
g, d := 0, tagDistance(t, tagGroups[0][0])
|
||||
for i := 1; i < count; i++ {
|
||||
if nd := tagDistance(t, tagGroups[i][0]); nd < d {
|
||||
g, d = i, nd
|
||||
}
|
||||
}
|
||||
tagGroups[g] = append(tagGroups[g], t)
|
||||
}
|
||||
|
||||
var nts []*Tag
|
||||
for _, g := range tagGroups {
|
||||
l, w, c := b.tagGroupLabel(g)
|
||||
nts = append(nts, &Tag{
|
||||
Name: l,
|
||||
Flat: w,
|
||||
Cum: c,
|
||||
})
|
||||
}
|
||||
return SortTags(nts, flatTags)
|
||||
}
|
||||
|
||||
func tagDistance(t, u *Tag) float64 {
|
||||
v, _ := measurement.Scale(u.Value, u.Unit, t.Unit)
|
||||
if v < float64(t.Value) {
|
||||
return float64(t.Value) - v
|
||||
}
|
||||
return v - float64(t.Value)
|
||||
}
|
||||
|
||||
func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) {
|
||||
formatTag := b.config.FormatTag
|
||||
if formatTag == nil {
|
||||
formatTag = measurement.Label
|
||||
}
|
||||
|
||||
if len(g) == 1 {
|
||||
t := g[0]
|
||||
return formatTag(t.Value, t.Unit), t.FlatValue(), t.CumValue()
|
||||
}
|
||||
min := g[0]
|
||||
max := g[0]
|
||||
df, f := min.FlatDiv, min.Flat
|
||||
dc, c := min.CumDiv, min.Cum
|
||||
for _, t := range g[1:] {
|
||||
if v, _ := measurement.Scale(t.Value, t.Unit, min.Unit); int64(v) < min.Value {
|
||||
min = t
|
||||
}
|
||||
if v, _ := measurement.Scale(t.Value, t.Unit, max.Unit); int64(v) > max.Value {
|
||||
max = t
|
||||
}
|
||||
f += t.Flat
|
||||
df += t.FlatDiv
|
||||
c += t.Cum
|
||||
dc += t.CumDiv
|
||||
}
|
||||
if df != 0 {
|
||||
f = f / df
|
||||
}
|
||||
if dc != 0 {
|
||||
c = c / dc
|
||||
}
|
||||
return formatTag(min.Value, min.Unit) + ".." + formatTag(max.Value, max.Unit), f, c
|
||||
}
|
||||
|
||||
func min64(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
282
src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph_test.go
generated
vendored
Normal file
282
src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph_test.go
generated
vendored
Normal file
|
@ -0,0 +1,282 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/pprof/internal/proftest"
|
||||
)
|
||||
|
||||
const path = "testdata/"
|
||||
|
||||
func TestComposeWithStandardGraph(t *testing.T) {
|
||||
g := baseGraph()
|
||||
a, c := baseAttrsAndConfig()
|
||||
|
||||
var buf bytes.Buffer
|
||||
ComposeDot(&buf, g, a, c)
|
||||
|
||||
want, err := ioutil.ReadFile(path + "compose1.dot")
|
||||
if err != nil {
|
||||
t.Fatalf("error reading test file: %v", err)
|
||||
}
|
||||
|
||||
compareGraphs(t, buf.Bytes(), want)
|
||||
}
|
||||
|
||||
func TestComposeWithNodeAttributesAndZeroFlat(t *testing.T) {
|
||||
g := baseGraph()
|
||||
a, c := baseAttrsAndConfig()
|
||||
|
||||
// Set NodeAttributes for Node 1.
|
||||
a.Nodes[g.Nodes[0]] = &DotNodeAttributes{
|
||||
Shape: "folder",
|
||||
Bold: true,
|
||||
Peripheries: 2,
|
||||
URL: "www.google.com",
|
||||
Formatter: func(ni *NodeInfo) string {
|
||||
return strings.ToUpper(ni.Name)
|
||||
},
|
||||
}
|
||||
|
||||
// Set Flat value to zero on Node 2.
|
||||
g.Nodes[1].Flat = 0
|
||||
|
||||
var buf bytes.Buffer
|
||||
ComposeDot(&buf, g, a, c)
|
||||
|
||||
want, err := ioutil.ReadFile(path + "compose2.dot")
|
||||
if err != nil {
|
||||
t.Fatalf("error reading test file: %v", err)
|
||||
}
|
||||
|
||||
compareGraphs(t, buf.Bytes(), want)
|
||||
}
|
||||
|
||||
func TestComposeWithTagsAndResidualEdge(t *testing.T) {
|
||||
g := baseGraph()
|
||||
a, c := baseAttrsAndConfig()
|
||||
|
||||
// Add tags to Node 1.
|
||||
g.Nodes[0].LabelTags["a"] = &Tag{
|
||||
Name: "tag1",
|
||||
Cum: 10,
|
||||
Flat: 10,
|
||||
}
|
||||
g.Nodes[0].NumericTags[""] = TagMap{
|
||||
"b": &Tag{
|
||||
Name: "tag2",
|
||||
Cum: 20,
|
||||
Flat: 20,
|
||||
Unit: "ms",
|
||||
},
|
||||
}
|
||||
|
||||
// Set edge to be Residual.
|
||||
g.Nodes[0].Out[g.Nodes[1]].Residual = true
|
||||
|
||||
var buf bytes.Buffer
|
||||
ComposeDot(&buf, g, a, c)
|
||||
|
||||
want, err := ioutil.ReadFile(path + "compose3.dot")
|
||||
if err != nil {
|
||||
t.Fatalf("error reading test file: %v", err)
|
||||
}
|
||||
|
||||
compareGraphs(t, buf.Bytes(), want)
|
||||
}
|
||||
|
||||
func TestComposeWithNestedTags(t *testing.T) {
|
||||
g := baseGraph()
|
||||
a, c := baseAttrsAndConfig()
|
||||
|
||||
// Add tags to Node 1.
|
||||
g.Nodes[0].LabelTags["tag1"] = &Tag{
|
||||
Name: "tag1",
|
||||
Cum: 10,
|
||||
Flat: 10,
|
||||
}
|
||||
g.Nodes[0].NumericTags["tag1"] = TagMap{
|
||||
"tag2": &Tag{
|
||||
Name: "tag2",
|
||||
Cum: 20,
|
||||
Flat: 20,
|
||||
Unit: "ms",
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
ComposeDot(&buf, g, a, c)
|
||||
|
||||
want, err := ioutil.ReadFile(path + "compose5.dot")
|
||||
if err != nil {
|
||||
t.Fatalf("error reading test file: %v", err)
|
||||
}
|
||||
|
||||
compareGraphs(t, buf.Bytes(), want)
|
||||
}
|
||||
|
||||
func TestComposeWithEmptyGraph(t *testing.T) {
|
||||
g := &Graph{}
|
||||
a, c := baseAttrsAndConfig()
|
||||
|
||||
var buf bytes.Buffer
|
||||
ComposeDot(&buf, g, a, c)
|
||||
|
||||
want, err := ioutil.ReadFile(path + "compose4.dot")
|
||||
if err != nil {
|
||||
t.Fatalf("error reading test file: %v", err)
|
||||
}
|
||||
|
||||
compareGraphs(t, buf.Bytes(), want)
|
||||
}
|
||||
|
||||
func baseGraph() *Graph {
|
||||
src := &Node{
|
||||
Info: NodeInfo{Name: "src"},
|
||||
Flat: 10,
|
||||
Cum: 25,
|
||||
In: make(EdgeMap),
|
||||
Out: make(EdgeMap),
|
||||
LabelTags: make(TagMap),
|
||||
NumericTags: make(map[string]TagMap),
|
||||
}
|
||||
dest := &Node{
|
||||
Info: NodeInfo{Name: "dest"},
|
||||
Flat: 15,
|
||||
Cum: 25,
|
||||
In: make(EdgeMap),
|
||||
Out: make(EdgeMap),
|
||||
LabelTags: make(TagMap),
|
||||
NumericTags: make(map[string]TagMap),
|
||||
}
|
||||
edge := &Edge{
|
||||
Src: src,
|
||||
Dest: dest,
|
||||
Weight: 10,
|
||||
}
|
||||
src.Out[dest] = edge
|
||||
src.In[src] = edge
|
||||
return &Graph{
|
||||
Nodes: Nodes{
|
||||
src,
|
||||
dest,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func baseAttrsAndConfig() (*DotAttributes, *DotConfig) {
|
||||
a := &DotAttributes{
|
||||
Nodes: make(map[*Node]*DotNodeAttributes),
|
||||
}
|
||||
c := &DotConfig{
|
||||
Title: "testtitle",
|
||||
Labels: []string{"label1", "label2"},
|
||||
Total: 100,
|
||||
FormatValue: func(v int64) string {
|
||||
return strconv.FormatInt(v, 10)
|
||||
},
|
||||
}
|
||||
return a, c
|
||||
}
|
||||
|
||||
func compareGraphs(t *testing.T, got, want []byte) {
|
||||
if string(got) != string(want) {
|
||||
d, err := proftest.Diff(got, want)
|
||||
if err != nil {
|
||||
t.Fatalf("error finding diff: %v", err)
|
||||
}
|
||||
t.Errorf("Compose incorrectly wrote %s", string(d))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultilinePrintableName(t *testing.T) {
|
||||
ni := &NodeInfo{
|
||||
Name: "test1.test2::test3",
|
||||
File: "src/file.cc",
|
||||
Address: 123,
|
||||
Lineno: 999,
|
||||
}
|
||||
|
||||
want := fmt.Sprintf(`%016x\ntest1\ntest2\ntest3\nfile.cc:999\n`, 123)
|
||||
if got := multilinePrintableName(ni); got != want {
|
||||
t.Errorf("multilinePrintableName(%#v) == %q, want %q", ni, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagCollapse(t *testing.T) {
|
||||
|
||||
makeTag := func(name, unit string, value, flat, cum int64) *Tag {
|
||||
return &Tag{name, unit, value, flat, 0, cum, 0}
|
||||
}
|
||||
|
||||
tagSource := []*Tag{
|
||||
makeTag("12mb", "mb", 12, 100, 100),
|
||||
makeTag("1kb", "kb", 1, 1, 1),
|
||||
makeTag("1mb", "mb", 1, 1000, 1000),
|
||||
makeTag("2048mb", "mb", 2048, 1000, 1000),
|
||||
makeTag("1b", "b", 1, 100, 100),
|
||||
makeTag("2b", "b", 2, 100, 100),
|
||||
makeTag("7b", "b", 7, 100, 100),
|
||||
}
|
||||
|
||||
tagWant := [][]*Tag{
|
||||
[]*Tag{
|
||||
makeTag("1B..2GB", "", 0, 2401, 2401),
|
||||
},
|
||||
[]*Tag{
|
||||
makeTag("2GB", "", 0, 1000, 1000),
|
||||
makeTag("1B..12MB", "", 0, 1401, 1401),
|
||||
},
|
||||
[]*Tag{
|
||||
makeTag("2GB", "", 0, 1000, 1000),
|
||||
makeTag("12MB", "", 0, 100, 100),
|
||||
makeTag("1B..1MB", "", 0, 1301, 1301),
|
||||
},
|
||||
[]*Tag{
|
||||
makeTag("2GB", "", 0, 1000, 1000),
|
||||
makeTag("1MB", "", 0, 1000, 1000),
|
||||
makeTag("2B..1kB", "", 0, 201, 201),
|
||||
makeTag("1B", "", 0, 100, 100),
|
||||
makeTag("12MB", "", 0, 100, 100),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tagWant {
|
||||
var got, want []*Tag
|
||||
b := builder{nil, &DotAttributes{}, &DotConfig{}}
|
||||
got = b.collapsedTags(tagSource, len(tc), true)
|
||||
want = SortTags(tc, true)
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("collapse to %d, got:\n%v\nwant:\n%v", len(tc), tagString(got), tagString(want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tagString(t []*Tag) string {
|
||||
var ret []string
|
||||
for _, s := range t {
|
||||
ret = append(ret, fmt.Sprintln(s))
|
||||
}
|
||||
return strings.Join(ret, ":")
|
||||
}
|
1134
src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
generated
vendored
Normal file
1134
src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
314
src/cmd/vendor/github.com/google/pprof/internal/graph/graph_test.go
generated
vendored
Normal file
314
src/cmd/vendor/github.com/google/pprof/internal/graph/graph_test.go
generated
vendored
Normal file
|
@ -0,0 +1,314 @@
|
|||
package graph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func edgeDebugString(edge *Edge) string {
|
||||
debug := ""
|
||||
debug += fmt.Sprintf("\t\tSrc: %p\n", edge.Src)
|
||||
debug += fmt.Sprintf("\t\tDest: %p\n", edge.Dest)
|
||||
debug += fmt.Sprintf("\t\tWeight: %d\n", edge.Weight)
|
||||
debug += fmt.Sprintf("\t\tResidual: %t\n", edge.Residual)
|
||||
debug += fmt.Sprintf("\t\tInline: %t\n", edge.Inline)
|
||||
return debug
|
||||
}
|
||||
|
||||
func edgeMapsDebugString(in, out EdgeMap) string {
|
||||
debug := ""
|
||||
debug += "In Edges:\n"
|
||||
for parent, edge := range in {
|
||||
debug += fmt.Sprintf("\tParent: %p\n", parent)
|
||||
debug += edgeDebugString(edge)
|
||||
}
|
||||
debug += "Out Edges:\n"
|
||||
for child, edge := range out {
|
||||
debug += fmt.Sprintf("\tChild: %p\n", child)
|
||||
debug += edgeDebugString(edge)
|
||||
}
|
||||
return debug
|
||||
}
|
||||
|
||||
func graphDebugString(graph *Graph) string {
|
||||
debug := ""
|
||||
for i, node := range graph.Nodes {
|
||||
debug += fmt.Sprintf("Node %d: %p\n", i, node)
|
||||
}
|
||||
|
||||
for i, node := range graph.Nodes {
|
||||
debug += "\n"
|
||||
debug += fmt.Sprintf("=== Node %d: %p ===\n", i, node)
|
||||
debug += edgeMapsDebugString(node.In, node.Out)
|
||||
}
|
||||
return debug
|
||||
}
|
||||
|
||||
func expectedNodesDebugString(expected []expectedNode) string {
|
||||
debug := ""
|
||||
for i, node := range expected {
|
||||
debug += fmt.Sprintf("Node %d: %p\n", i, node.node)
|
||||
}
|
||||
|
||||
for i, node := range expected {
|
||||
debug += "\n"
|
||||
debug += fmt.Sprintf("=== Node %d: %p ===\n", i, node.node)
|
||||
debug += edgeMapsDebugString(node.in, node.out)
|
||||
}
|
||||
return debug
|
||||
}
|
||||
|
||||
// edgeMapsEqual checks if all the edges in this equal all the edges in that.
|
||||
func edgeMapsEqual(this, that EdgeMap) bool {
|
||||
if len(this) != len(that) {
|
||||
return false
|
||||
}
|
||||
for node, thisEdge := range this {
|
||||
if *thisEdge != *that[node] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nodesEqual checks if node is equal to expected.
|
||||
func nodesEqual(node *Node, expected expectedNode) bool {
|
||||
return node == expected.node && edgeMapsEqual(node.In, expected.in) &&
|
||||
edgeMapsEqual(node.Out, expected.out)
|
||||
}
|
||||
|
||||
// graphsEqual checks if graph is equivalent to the graph templated by expected.
|
||||
func graphsEqual(graph *Graph, expected []expectedNode) bool {
|
||||
if len(graph.Nodes) != len(expected) {
|
||||
return false
|
||||
}
|
||||
expectedSet := make(map[*Node]expectedNode)
|
||||
for i := range expected {
|
||||
expectedSet[expected[i].node] = expected[i]
|
||||
}
|
||||
|
||||
for _, node := range graph.Nodes {
|
||||
expectedNode, found := expectedSet[node]
|
||||
if !found || !nodesEqual(node, expectedNode) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type expectedNode struct {
|
||||
node *Node
|
||||
in, out EdgeMap
|
||||
}
|
||||
|
||||
type trimTreeTestcase struct {
|
||||
initial *Graph
|
||||
expected []expectedNode
|
||||
keep NodePtrSet
|
||||
}
|
||||
|
||||
// makeExpectedEdgeResidual makes the edge from parent to child residual.
|
||||
func makeExpectedEdgeResidual(parent, child expectedNode) {
|
||||
parent.out[child.node].Residual = true
|
||||
child.in[parent.node].Residual = true
|
||||
}
|
||||
|
||||
func makeEdgeInline(edgeMap EdgeMap, node *Node) {
|
||||
edgeMap[node].Inline = true
|
||||
}
|
||||
|
||||
func setEdgeWeight(edgeMap EdgeMap, node *Node, weight int64) {
|
||||
edgeMap[node].Weight = weight
|
||||
}
|
||||
|
||||
// createEdges creates directed edges from the parent to each of the children.
|
||||
func createEdges(parent *Node, children ...*Node) {
|
||||
for _, child := range children {
|
||||
edge := &Edge{
|
||||
Src: parent,
|
||||
Dest: child,
|
||||
}
|
||||
parent.Out[child] = edge
|
||||
child.In[parent] = edge
|
||||
}
|
||||
}
|
||||
|
||||
// createEmptyNode creates a node without any edges.
|
||||
func createEmptyNode() *Node {
|
||||
return &Node{
|
||||
In: make(EdgeMap),
|
||||
Out: make(EdgeMap),
|
||||
}
|
||||
}
|
||||
|
||||
// createExpectedNodes creates a slice of expectedNodes from nodes.
|
||||
func createExpectedNodes(nodes ...*Node) ([]expectedNode, NodePtrSet) {
|
||||
expected := make([]expectedNode, len(nodes))
|
||||
keep := make(NodePtrSet, len(nodes))
|
||||
|
||||
for i, node := range nodes {
|
||||
expected[i] = expectedNode{
|
||||
node: node,
|
||||
in: make(EdgeMap),
|
||||
out: make(EdgeMap),
|
||||
}
|
||||
keep[node] = true
|
||||
}
|
||||
|
||||
return expected, keep
|
||||
}
|
||||
|
||||
// createExpectedEdges creates directed edges from the parent to each of the
|
||||
// children.
|
||||
func createExpectedEdges(parent expectedNode, children ...expectedNode) {
|
||||
for _, child := range children {
|
||||
edge := &Edge{
|
||||
Src: parent.node,
|
||||
Dest: child.node,
|
||||
}
|
||||
parent.out[child.node] = edge
|
||||
child.in[parent.node] = edge
|
||||
}
|
||||
}
|
||||
|
||||
// createTestCase1 creates a test case that initally looks like:
|
||||
// 0
|
||||
// |(5)
|
||||
// 1
|
||||
// (3)/ \(4)
|
||||
// 2 3.
|
||||
//
|
||||
// After keeping 0, 2, and 3, it expects the graph:
|
||||
// 0
|
||||
// (3)/ \(4)
|
||||
// 2 3.
|
||||
func createTestCase1() trimTreeTestcase {
|
||||
// Create initial graph
|
||||
graph := &Graph{make(Nodes, 4)}
|
||||
nodes := graph.Nodes
|
||||
for i := range nodes {
|
||||
nodes[i] = createEmptyNode()
|
||||
}
|
||||
createEdges(nodes[0], nodes[1])
|
||||
createEdges(nodes[1], nodes[2], nodes[3])
|
||||
makeEdgeInline(nodes[0].Out, nodes[1])
|
||||
makeEdgeInline(nodes[1].Out, nodes[2])
|
||||
setEdgeWeight(nodes[0].Out, nodes[1], 5)
|
||||
setEdgeWeight(nodes[1].Out, nodes[2], 3)
|
||||
setEdgeWeight(nodes[1].Out, nodes[3], 4)
|
||||
|
||||
// Create expected graph
|
||||
expected, keep := createExpectedNodes(nodes[0], nodes[2], nodes[3])
|
||||
createExpectedEdges(expected[0], expected[1], expected[2])
|
||||
makeEdgeInline(expected[0].out, expected[1].node)
|
||||
makeExpectedEdgeResidual(expected[0], expected[1])
|
||||
makeExpectedEdgeResidual(expected[0], expected[2])
|
||||
setEdgeWeight(expected[0].out, expected[1].node, 3)
|
||||
setEdgeWeight(expected[0].out, expected[2].node, 4)
|
||||
return trimTreeTestcase{
|
||||
initial: graph,
|
||||
expected: expected,
|
||||
keep: keep,
|
||||
}
|
||||
}
|
||||
|
||||
// createTestCase2 creates a test case that initially looks like:
|
||||
// 3
|
||||
// | (12)
|
||||
// 1
|
||||
// | (8)
|
||||
// 2
|
||||
// | (15)
|
||||
// 0
|
||||
// | (10)
|
||||
// 4.
|
||||
//
|
||||
// After keeping 3 and 4, it expects the graph:
|
||||
// 3
|
||||
// | (10)
|
||||
// 4.
|
||||
func createTestCase2() trimTreeTestcase {
|
||||
// Create initial graph
|
||||
graph := &Graph{make(Nodes, 5)}
|
||||
nodes := graph.Nodes
|
||||
for i := range nodes {
|
||||
nodes[i] = createEmptyNode()
|
||||
}
|
||||
createEdges(nodes[3], nodes[1])
|
||||
createEdges(nodes[1], nodes[2])
|
||||
createEdges(nodes[2], nodes[0])
|
||||
createEdges(nodes[0], nodes[4])
|
||||
setEdgeWeight(nodes[3].Out, nodes[1], 12)
|
||||
setEdgeWeight(nodes[1].Out, nodes[2], 8)
|
||||
setEdgeWeight(nodes[2].Out, nodes[0], 15)
|
||||
setEdgeWeight(nodes[0].Out, nodes[4], 10)
|
||||
|
||||
// Create expected graph
|
||||
expected, keep := createExpectedNodes(nodes[3], nodes[4])
|
||||
createExpectedEdges(expected[0], expected[1])
|
||||
makeExpectedEdgeResidual(expected[0], expected[1])
|
||||
setEdgeWeight(expected[0].out, expected[1].node, 10)
|
||||
return trimTreeTestcase{
|
||||
initial: graph,
|
||||
expected: expected,
|
||||
keep: keep,
|
||||
}
|
||||
}
|
||||
|
||||
// createTestCase3 creates an initally empty graph and expects an empty graph
|
||||
// after trimming.
|
||||
func createTestCase3() trimTreeTestcase {
|
||||
graph := &Graph{make(Nodes, 0)}
|
||||
expected, keep := createExpectedNodes()
|
||||
return trimTreeTestcase{
|
||||
initial: graph,
|
||||
expected: expected,
|
||||
keep: keep,
|
||||
}
|
||||
}
|
||||
|
||||
// createTestCase4 creates a test case that initially looks like:
|
||||
// 0.
|
||||
//
|
||||
// After keeping 0, it expects the graph:
|
||||
// 0.
|
||||
func createTestCase4() trimTreeTestcase {
|
||||
graph := &Graph{make(Nodes, 1)}
|
||||
nodes := graph.Nodes
|
||||
for i := range nodes {
|
||||
nodes[i] = createEmptyNode()
|
||||
}
|
||||
expected, keep := createExpectedNodes(nodes[0])
|
||||
return trimTreeTestcase{
|
||||
initial: graph,
|
||||
expected: expected,
|
||||
keep: keep,
|
||||
}
|
||||
}
|
||||
|
||||
func createTrimTreeTestCases() []trimTreeTestcase {
|
||||
caseGenerators := []func() trimTreeTestcase{
|
||||
createTestCase1,
|
||||
createTestCase2,
|
||||
createTestCase3,
|
||||
createTestCase4,
|
||||
}
|
||||
cases := make([]trimTreeTestcase, len(caseGenerators))
|
||||
for i, gen := range caseGenerators {
|
||||
cases[i] = gen()
|
||||
}
|
||||
return cases
|
||||
}
|
||||
|
||||
func TestTrimTree(t *testing.T) {
|
||||
tests := createTrimTreeTestCases()
|
||||
for _, test := range tests {
|
||||
graph := test.initial
|
||||
graph.TrimTree(test.keep)
|
||||
if !graphsEqual(graph, test.expected) {
|
||||
t.Fatalf("Graphs do not match.\nExpected: %s\nFound: %s\n",
|
||||
expectedNodesDebugString(test.expected),
|
||||
graphDebugString(graph))
|
||||
}
|
||||
}
|
||||
}
|
7
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose1.dot
generated
vendored
Normal file
7
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose1.dot
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
digraph "testtitle" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] }
|
||||
N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"]
|
||||
}
|
7
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose2.dot
generated
vendored
Normal file
7
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose2.dot
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
digraph "testtitle" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] }
|
||||
N1 [label="SRC10 (10.00%)\nof 25 (25.00%)" fontsize=24 shape=folder tooltip="src (25)" color="#b23c00" fillcolor="#edddd5" style="bold,filled" peripheries=2 URL="www.google.com" target="_blank"]
|
||||
N2 [label="dest\n0 of 25 (25.00%)" fontsize=8 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"]
|
||||
}
|
11
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose3.dot
generated
vendored
Normal file
11
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose3.dot
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
digraph "testtitle" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] }
|
||||
N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N1_0 [label = "tag1" fontsize=8 shape=box3d tooltip="10"]
|
||||
N1 -> N1_0 [label=" 10" weight=100 tooltip="10" labeltooltip="10"]
|
||||
NN1_0 [label = "tag2" fontsize=8 shape=box3d tooltip="20"]
|
||||
N1 -> NN1_0 [label=" 20" weight=100 tooltip="20" labeltooltip="20"]
|
||||
N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src ... dest (10)" labeltooltip="src ... dest (10)" style="dotted" minlen=2]
|
||||
}
|
4
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose4.dot
generated
vendored
Normal file
4
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose4.dot
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
digraph "testtitle" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] }
|
||||
}
|
11
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose5.dot
generated
vendored
Normal file
11
src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose5.dot
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
digraph "testtitle" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] }
|
||||
N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N1_0 [label = "tag1" fontsize=8 shape=box3d tooltip="10"]
|
||||
N1 -> N1_0 [label=" 10" weight=100 tooltip="10" labeltooltip="10"]
|
||||
NN1_0_0 [label = "tag2" fontsize=8 shape=box3d tooltip="20"]
|
||||
N1_0 -> NN1_0_0 [label=" 20" weight=100 tooltip="20" labeltooltip="20"]
|
||||
N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"]
|
||||
N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)" minlen=2]
|
||||
}
|
299
src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
generated
vendored
Normal file
299
src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
generated
vendored
Normal file
|
@ -0,0 +1,299 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package measurement export utility functions to manipulate/format performance profile sample values.
|
||||
package measurement
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
// ScaleProfiles updates the units in a set of profiles to make them
|
||||
// compatible. It scales the profiles to the smallest unit to preserve
|
||||
// data.
|
||||
func ScaleProfiles(profiles []*profile.Profile) error {
|
||||
if len(profiles) == 0 {
|
||||
return nil
|
||||
}
|
||||
periodTypes := make([]*profile.ValueType, 0, len(profiles))
|
||||
for _, p := range profiles {
|
||||
if p.PeriodType != nil {
|
||||
periodTypes = append(periodTypes, p.PeriodType)
|
||||
}
|
||||
}
|
||||
periodType, err := CommonValueType(periodTypes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("period type: %v", err)
|
||||
}
|
||||
|
||||
// Identify common sample types
|
||||
numSampleTypes := len(profiles[0].SampleType)
|
||||
for _, p := range profiles[1:] {
|
||||
if numSampleTypes != len(p.SampleType) {
|
||||
return fmt.Errorf("inconsistent samples type count: %d != %d", numSampleTypes, len(p.SampleType))
|
||||
}
|
||||
}
|
||||
sampleType := make([]*profile.ValueType, numSampleTypes)
|
||||
for i := 0; i < numSampleTypes; i++ {
|
||||
sampleTypes := make([]*profile.ValueType, len(profiles))
|
||||
for j, p := range profiles {
|
||||
sampleTypes[j] = p.SampleType[i]
|
||||
}
|
||||
sampleType[i], err = CommonValueType(sampleTypes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sample types: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range profiles {
|
||||
if p.PeriodType != nil && periodType != nil {
|
||||
period, _ := Scale(p.Period, p.PeriodType.Unit, periodType.Unit)
|
||||
p.Period, p.PeriodType.Unit = int64(period), periodType.Unit
|
||||
}
|
||||
ratios := make([]float64, len(p.SampleType))
|
||||
for i, st := range p.SampleType {
|
||||
if sampleType[i] == nil {
|
||||
ratios[i] = 1
|
||||
continue
|
||||
}
|
||||
ratios[i], _ = Scale(1, st.Unit, sampleType[i].Unit)
|
||||
p.SampleType[i].Unit = sampleType[i].Unit
|
||||
}
|
||||
if err := p.ScaleN(ratios); err != nil {
|
||||
return fmt.Errorf("scale: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommonValueType returns the finest type from a set of compatible
|
||||
// types.
|
||||
func CommonValueType(ts []*profile.ValueType) (*profile.ValueType, error) {
|
||||
if len(ts) <= 1 {
|
||||
return nil, nil
|
||||
}
|
||||
minType := ts[0]
|
||||
for _, t := range ts[1:] {
|
||||
if !compatibleValueTypes(minType, t) {
|
||||
return nil, fmt.Errorf("incompatible types: %v %v", *minType, *t)
|
||||
}
|
||||
if ratio, _ := Scale(1, t.Unit, minType.Unit); ratio < 1 {
|
||||
minType = t
|
||||
}
|
||||
}
|
||||
rcopy := *minType
|
||||
return &rcopy, nil
|
||||
}
|
||||
|
||||
func compatibleValueTypes(v1, v2 *profile.ValueType) bool {
|
||||
if v1 == nil || v2 == nil {
|
||||
return true // No grounds to disqualify.
|
||||
}
|
||||
// Remove trailing 's' to permit minor mismatches.
|
||||
if t1, t2 := strings.TrimSuffix(v1.Type, "s"), strings.TrimSuffix(v2.Type, "s"); t1 != t2 {
|
||||
return false
|
||||
}
|
||||
|
||||
return v1.Unit == v2.Unit ||
|
||||
(isTimeUnit(v1.Unit) && isTimeUnit(v2.Unit)) ||
|
||||
(isMemoryUnit(v1.Unit) && isMemoryUnit(v2.Unit))
|
||||
}
|
||||
|
||||
// Scale a measurement from an unit to a different unit and returns
|
||||
// the scaled value and the target unit. The returned target unit
|
||||
// will be empty if uninteresting (could be skipped).
|
||||
func Scale(value int64, fromUnit, toUnit string) (float64, string) {
|
||||
// Avoid infinite recursion on overflow.
|
||||
if value < 0 && -value > 0 {
|
||||
v, u := Scale(-value, fromUnit, toUnit)
|
||||
return -v, u
|
||||
}
|
||||
if m, u, ok := memoryLabel(value, fromUnit, toUnit); ok {
|
||||
return m, u
|
||||
}
|
||||
if t, u, ok := timeLabel(value, fromUnit, toUnit); ok {
|
||||
return t, u
|
||||
}
|
||||
// Skip non-interesting units.
|
||||
switch toUnit {
|
||||
case "count", "sample", "unit", "minimum", "auto":
|
||||
return float64(value), ""
|
||||
default:
|
||||
return float64(value), toUnit
|
||||
}
|
||||
}
|
||||
|
||||
// Label returns the label used to describe a certain measurement.
|
||||
func Label(value int64, unit string) string {
|
||||
return ScaledLabel(value, unit, "auto")
|
||||
}
|
||||
|
||||
// ScaledLabel scales the passed-in measurement (if necessary) and
|
||||
// returns the label used to describe a float measurement.
|
||||
func ScaledLabel(value int64, fromUnit, toUnit string) string {
|
||||
v, u := Scale(value, fromUnit, toUnit)
|
||||
sv := strings.TrimSuffix(fmt.Sprintf("%.2f", v), ".00")
|
||||
if sv == "0" || sv == "-0" {
|
||||
return "0"
|
||||
}
|
||||
return sv + u
|
||||
}
|
||||
|
||||
// isMemoryUnit returns whether a name is recognized as a memory size
|
||||
// unit.
|
||||
func isMemoryUnit(unit string) bool {
|
||||
switch strings.TrimSuffix(strings.ToLower(unit), "s") {
|
||||
case "byte", "b", "kilobyte", "kb", "megabyte", "mb", "gigabyte", "gb":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
|
||||
fromUnit = strings.TrimSuffix(strings.ToLower(fromUnit), "s")
|
||||
toUnit = strings.TrimSuffix(strings.ToLower(toUnit), "s")
|
||||
|
||||
switch fromUnit {
|
||||
case "byte", "b":
|
||||
case "kilobyte", "kb":
|
||||
value *= 1024
|
||||
case "megabyte", "mb":
|
||||
value *= 1024 * 1024
|
||||
case "gigabyte", "gb":
|
||||
value *= 1024 * 1024 * 1024
|
||||
default:
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
if toUnit == "minimum" || toUnit == "auto" {
|
||||
switch {
|
||||
case value < 1024:
|
||||
toUnit = "b"
|
||||
case value < 1024*1024:
|
||||
toUnit = "kb"
|
||||
case value < 1024*1024*1024:
|
||||
toUnit = "mb"
|
||||
default:
|
||||
toUnit = "gb"
|
||||
}
|
||||
}
|
||||
|
||||
var output float64
|
||||
switch toUnit {
|
||||
default:
|
||||
output, toUnit = float64(value), "B"
|
||||
case "kb", "kbyte", "kilobyte":
|
||||
output, toUnit = float64(value)/1024, "kB"
|
||||
case "mb", "mbyte", "megabyte":
|
||||
output, toUnit = float64(value)/(1024*1024), "MB"
|
||||
case "gb", "gbyte", "gigabyte":
|
||||
output, toUnit = float64(value)/(1024*1024*1024), "GB"
|
||||
}
|
||||
return output, toUnit, true
|
||||
}
|
||||
|
||||
// isTimeUnit returns whether a name is recognized as a time unit.
|
||||
func isTimeUnit(unit string) bool {
|
||||
unit = strings.ToLower(unit)
|
||||
if len(unit) > 2 {
|
||||
unit = strings.TrimSuffix(unit, "s")
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case "nanosecond", "ns", "microsecond", "millisecond", "ms", "s", "second", "sec", "hr", "day", "week", "year":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func timeLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
|
||||
fromUnit = strings.ToLower(fromUnit)
|
||||
if len(fromUnit) > 2 {
|
||||
fromUnit = strings.TrimSuffix(fromUnit, "s")
|
||||
}
|
||||
|
||||
toUnit = strings.ToLower(toUnit)
|
||||
if len(toUnit) > 2 {
|
||||
toUnit = strings.TrimSuffix(toUnit, "s")
|
||||
}
|
||||
|
||||
var d time.Duration
|
||||
switch fromUnit {
|
||||
case "nanosecond", "ns":
|
||||
d = time.Duration(value) * time.Nanosecond
|
||||
case "microsecond":
|
||||
d = time.Duration(value) * time.Microsecond
|
||||
case "millisecond", "ms":
|
||||
d = time.Duration(value) * time.Millisecond
|
||||
case "second", "sec", "s":
|
||||
d = time.Duration(value) * time.Second
|
||||
case "cycle":
|
||||
return float64(value), "", true
|
||||
default:
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
if toUnit == "minimum" || toUnit == "auto" {
|
||||
switch {
|
||||
case d < 1*time.Microsecond:
|
||||
toUnit = "ns"
|
||||
case d < 1*time.Millisecond:
|
||||
toUnit = "us"
|
||||
case d < 1*time.Second:
|
||||
toUnit = "ms"
|
||||
case d < 1*time.Minute:
|
||||
toUnit = "sec"
|
||||
case d < 1*time.Hour:
|
||||
toUnit = "min"
|
||||
case d < 24*time.Hour:
|
||||
toUnit = "hour"
|
||||
case d < 15*24*time.Hour:
|
||||
toUnit = "day"
|
||||
case d < 120*24*time.Hour:
|
||||
toUnit = "week"
|
||||
default:
|
||||
toUnit = "year"
|
||||
}
|
||||
}
|
||||
|
||||
var output float64
|
||||
dd := float64(d)
|
||||
switch toUnit {
|
||||
case "ns", "nanosecond":
|
||||
output, toUnit = dd/float64(time.Nanosecond), "ns"
|
||||
case "us", "microsecond":
|
||||
output, toUnit = dd/float64(time.Microsecond), "us"
|
||||
case "ms", "millisecond":
|
||||
output, toUnit = dd/float64(time.Millisecond), "ms"
|
||||
case "min", "minute":
|
||||
output, toUnit = dd/float64(time.Minute), "mins"
|
||||
case "hour", "hr":
|
||||
output, toUnit = dd/float64(time.Hour), "hrs"
|
||||
case "day":
|
||||
output, toUnit = dd/float64(24*time.Hour), "days"
|
||||
case "week", "wk":
|
||||
output, toUnit = dd/float64(7*24*time.Hour), "wks"
|
||||
case "year", "yr":
|
||||
output, toUnit = dd/float64(365*7*24*time.Hour), "yrs"
|
||||
default:
|
||||
fallthrough
|
||||
case "sec", "second", "s":
|
||||
output, toUnit = dd/float64(time.Second), "s"
|
||||
}
|
||||
return output, toUnit, true
|
||||
}
|
|
@ -1,21 +1,44 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package plugin defines the plugin implementations that the main pprof driver requires.
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"internal/pprof/profile"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
// Options groups all the optional plugins into pprof.
|
||||
type Options struct {
|
||||
Writer Writer
|
||||
Flagset FlagSet
|
||||
Fetch Fetcher
|
||||
Sym Symbolizer
|
||||
Obj ObjTool
|
||||
UI UI
|
||||
}
|
||||
|
||||
// Writer provides a mechanism to write data under a certain name,
|
||||
// typically a filename.
|
||||
type Writer interface {
|
||||
Open(name string) (io.WriteCloser, error)
|
||||
}
|
||||
|
||||
// A FlagSet creates and parses command-line flags.
|
||||
// It is similar to the standard flag.FlagSet.
|
||||
type FlagSet interface {
|
||||
|
@ -26,6 +49,17 @@ type FlagSet interface {
|
|||
Float64(name string, def float64, usage string) *float64
|
||||
String(name string, def string, usage string) *string
|
||||
|
||||
// BoolVar, IntVar, Float64Var, and StringVar define new flags referencing
|
||||
// a given pointer, like the functions of the same name in package flag.
|
||||
BoolVar(pointer *bool, name string, def bool, usage string)
|
||||
IntVar(pointer *int, name string, def int, usage string)
|
||||
Float64Var(pointer *float64, name string, def float64, usage string)
|
||||
StringVar(pointer *string, name string, def string, usage string)
|
||||
|
||||
// StringList is similar to String but allows multiple values for a
|
||||
// single flag
|
||||
StringList(name string, def string, usage string) *[]*string
|
||||
|
||||
// ExtraUsage returns any additional text that should be
|
||||
// printed after the standard usage message.
|
||||
// The typical use of ExtraUsage is to show any custom flags
|
||||
|
@ -39,51 +73,48 @@ type FlagSet interface {
|
|||
Parse(usage func()) []string
|
||||
}
|
||||
|
||||
// A Fetcher reads and returns the profile named by src. src can be a
|
||||
// local file path or a URL. duration and timeout are units specified
|
||||
// by the end user, or 0 by default. duration refers to the length of
|
||||
// the profile collection, if applicable, and timeout is the amount of
|
||||
// time to wait for a profile before returning an error. Returns the
|
||||
// fetched profile, the URL of the actual source of the profile, or an
|
||||
// error.
|
||||
type Fetcher interface {
|
||||
Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error)
|
||||
}
|
||||
|
||||
// A Symbolizer introduces symbol information into a profile.
|
||||
type Symbolizer interface {
|
||||
Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error
|
||||
}
|
||||
|
||||
// MappingSources map each profile.Mapping to the source of the profile.
|
||||
// The key is either Mapping.File or Mapping.BuildId.
|
||||
type MappingSources map[string][]struct {
|
||||
Source string // URL of the source the mapping was collected from
|
||||
Start uint64 // delta applied to addresses from this source (to represent Merge adjustments)
|
||||
}
|
||||
|
||||
// An ObjTool inspects shared libraries and executable files.
|
||||
type ObjTool interface {
|
||||
// Open opens the named object file.
|
||||
// If the object is a shared library, start is the address where
|
||||
// it is mapped into memory in the address space being inspected.
|
||||
Open(file string, start uint64) (ObjFile, error)
|
||||
|
||||
// Demangle translates a batch of symbol names from mangled
|
||||
// form to human-readable form.
|
||||
Demangle(names []string) (map[string]string, error)
|
||||
// Open opens the named object file. If the object is a shared
|
||||
// library, start/limit/offset are the addresses where it is mapped
|
||||
// into memory in the address space being inspected.
|
||||
Open(file string, start, limit, offset uint64) (ObjFile, error)
|
||||
|
||||
// Disasm disassembles the named object file, starting at
|
||||
// the start address and stopping at (before) the end address.
|
||||
Disasm(file string, start, end uint64) ([]Inst, error)
|
||||
|
||||
// SetConfig configures the tool.
|
||||
// The implementation defines the meaning of the string
|
||||
// and can ignore it entirely.
|
||||
SetConfig(config string)
|
||||
}
|
||||
|
||||
// NoObjTool returns a trivial implementation of the ObjTool interface.
|
||||
// Open returns an error indicating that the requested file does not exist.
|
||||
// Demangle returns an empty map and a nil error.
|
||||
// Disasm returns an error.
|
||||
// SetConfig is a no-op.
|
||||
func NoObjTool() ObjTool {
|
||||
return noObjTool{}
|
||||
}
|
||||
|
||||
type noObjTool struct{}
|
||||
|
||||
func (noObjTool) Open(file string, start uint64) (ObjFile, error) {
|
||||
return nil, &os.PathError{Op: "open", Path: file, Err: os.ErrNotExist}
|
||||
}
|
||||
|
||||
func (noObjTool) Demangle(name []string) (map[string]string, error) {
|
||||
return make(map[string]string), nil
|
||||
}
|
||||
|
||||
func (noObjTool) Disasm(file string, start, end uint64) ([]Inst, error) {
|
||||
return nil, fmt.Errorf("disassembly not supported")
|
||||
}
|
||||
|
||||
func (noObjTool) SetConfig(config string) {
|
||||
// An Inst is a single instruction in an assembly listing.
|
||||
type Inst struct {
|
||||
Addr uint64 // virtual address of instruction
|
||||
Text string // instruction text
|
||||
Function string // function name
|
||||
File string // source file
|
||||
Line int // source line
|
||||
}
|
||||
|
||||
// An ObjFile is a single object file: a shared library or executable.
|
||||
|
@ -129,18 +160,11 @@ type Sym struct {
|
|||
End uint64 // virtual address of last byte in sym (Start+size-1)
|
||||
}
|
||||
|
||||
// An Inst is a single instruction in an assembly listing.
|
||||
type Inst struct {
|
||||
Addr uint64 // virtual address of instruction
|
||||
Text string // instruction text
|
||||
File string // source file
|
||||
Line int // source line
|
||||
}
|
||||
|
||||
// A UI manages user interactions.
|
||||
type UI interface {
|
||||
// Read returns a line of text (a command) read from the user.
|
||||
ReadLine() (string, error)
|
||||
// prompt is printed before reading the command.
|
||||
ReadLine(prompt string) (string, error)
|
||||
|
||||
// Print shows a message to the user.
|
||||
// It formats the text as fmt.Print would and adds a final \n if not already present.
|
||||
|
@ -161,53 +185,3 @@ type UI interface {
|
|||
// the auto-completion of cmd, if the UI supports auto-completion at all.
|
||||
SetAutoComplete(complete func(string) string)
|
||||
}
|
||||
|
||||
// StandardUI returns a UI that reads from standard input,
|
||||
// prints messages to standard output,
|
||||
// prints errors to standard error, and doesn't use auto-completion.
|
||||
func StandardUI() UI {
|
||||
return &stdUI{r: bufio.NewReader(os.Stdin)}
|
||||
}
|
||||
|
||||
type stdUI struct {
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
func (ui *stdUI) ReadLine() (string, error) {
|
||||
os.Stdout.WriteString("(pprof) ")
|
||||
return ui.r.ReadString('\n')
|
||||
}
|
||||
|
||||
func (ui *stdUI) Print(args ...interface{}) {
|
||||
ui.fprint(os.Stderr, args)
|
||||
}
|
||||
|
||||
func (ui *stdUI) PrintErr(args ...interface{}) {
|
||||
ui.fprint(os.Stderr, args)
|
||||
}
|
||||
|
||||
func (ui *stdUI) IsTerminal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (ui *stdUI) SetAutoComplete(func(string) string) {
|
||||
}
|
||||
|
||||
func (ui *stdUI) fprint(f *os.File, args []interface{}) {
|
||||
text := fmt.Sprint(args...)
|
||||
if !strings.HasSuffix(text, "\n") {
|
||||
text += "\n"
|
||||
}
|
||||
f.WriteString(text)
|
||||
}
|
||||
|
||||
// A Fetcher reads and returns the profile named by src.
|
||||
// It gives up after the given timeout, unless src contains a timeout override
|
||||
// (as defined by the implementation).
|
||||
// It can print messages to ui.
|
||||
type Fetcher func(src string, timeout time.Duration, ui UI) (*profile.Profile, error)
|
||||
|
||||
// A Symbolizer annotates a profile with symbol information.
|
||||
// The profile was fetch from src.
|
||||
// The meaning of mode is defined by the implementation.
|
||||
type Symbolizer func(mode, src string, prof *profile.Profile, obj ObjTool, ui UI) error
|
106
src/cmd/vendor/github.com/google/pprof/internal/proftest/proftest.go
generated
vendored
Normal file
106
src/cmd/vendor/github.com/google/pprof/internal/proftest/proftest.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package proftest provides some utility routines to test other
|
||||
// packages related to profiles.
|
||||
package proftest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Diff compares two byte arrays using the diff tool to highlight the
|
||||
// differences. It is meant for testing purposes to display the
|
||||
// differences between expected and actual output.
|
||||
func Diff(b1, b2 []byte) (data []byte, err error) {
|
||||
f1, err := ioutil.TempFile("", "proto_test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(f1.Name())
|
||||
defer f1.Close()
|
||||
|
||||
f2, err := ioutil.TempFile("", "proto_test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(f2.Name())
|
||||
defer f2.Close()
|
||||
|
||||
f1.Write(b1)
|
||||
f2.Write(b2)
|
||||
|
||||
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
|
||||
if len(data) > 0 {
|
||||
// diff exits with a non-zero status when the files don't match.
|
||||
// Ignore that failure as long as we get output.
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
data = []byte(fmt.Sprintf("diff failed: %v\nb1: %q\nb2: %q\n", err, b1, b2))
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeJSON encodes a value into a byte array. This is intended for
|
||||
// testing purposes.
|
||||
func EncodeJSON(x interface{}) []byte {
|
||||
data, err := json.MarshalIndent(x, "", " ")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
data = append(data, '\n')
|
||||
return data
|
||||
}
|
||||
|
||||
// TestUI implements the plugin.UI interface, triggering test failures
|
||||
// if more than Ignore errors are printed.
|
||||
type TestUI struct {
|
||||
T *testing.T
|
||||
Ignore int
|
||||
}
|
||||
|
||||
// ReadLine returns no input, as no input is expected during testing.
|
||||
func (ui *TestUI) ReadLine(_ string) (string, error) {
|
||||
return "", fmt.Errorf("no input")
|
||||
}
|
||||
|
||||
// Print messages are discarded by the test UI.
|
||||
func (ui *TestUI) Print(args ...interface{}) {
|
||||
}
|
||||
|
||||
// PrintErr messages may trigger an error failure. A fixed number of
|
||||
// error messages are permitted when appropriate.
|
||||
func (ui *TestUI) PrintErr(args ...interface{}) {
|
||||
if ui.Ignore > 0 {
|
||||
ui.Ignore--
|
||||
return
|
||||
}
|
||||
ui.T.Error(args)
|
||||
}
|
||||
|
||||
// IsTerminal indicates if the UI is an interactive terminal.
|
||||
func (ui *TestUI) IsTerminal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetAutoComplete is not supported by the test UI.
|
||||
func (ui *TestUI) SetAutoComplete(_ func(string) string) {
|
||||
}
|
1167
src/cmd/vendor/github.com/google/pprof/internal/report/report.go
generated
vendored
Normal file
1167
src/cmd/vendor/github.com/google/pprof/internal/report/report.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
266
src/cmd/vendor/github.com/google/pprof/internal/report/report_test.go
generated
vendored
Normal file
266
src/cmd/vendor/github.com/google/pprof/internal/report/report_test.go
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package report
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/google/pprof/internal/binutils"
|
||||
"github.com/google/pprof/internal/graph"
|
||||
"github.com/google/pprof/internal/proftest"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
type testcase struct {
|
||||
rpt *Report
|
||||
want string
|
||||
}
|
||||
|
||||
func TestSource(t *testing.T) {
|
||||
const path = "testdata/"
|
||||
|
||||
sampleValue1 := func(v []int64) int64 {
|
||||
return v[1]
|
||||
}
|
||||
|
||||
for _, tc := range []testcase{
|
||||
{
|
||||
rpt: New(
|
||||
testProfile.Copy(),
|
||||
&Options{
|
||||
OutputFormat: List,
|
||||
Symbol: regexp.MustCompile(`.`),
|
||||
|
||||
SampleValue: sampleValue1,
|
||||
SampleUnit: testProfile.SampleType[1].Unit,
|
||||
},
|
||||
),
|
||||
want: path + "source.rpt",
|
||||
},
|
||||
{
|
||||
rpt: New(
|
||||
testProfile.Copy(),
|
||||
&Options{
|
||||
OutputFormat: Dot,
|
||||
CallTree: true,
|
||||
Symbol: regexp.MustCompile(`.`),
|
||||
|
||||
SampleValue: sampleValue1,
|
||||
SampleUnit: testProfile.SampleType[1].Unit,
|
||||
},
|
||||
),
|
||||
want: path + "source.dot",
|
||||
},
|
||||
} {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if err := Generate(b, tc.rpt, &binutils.Binutils{}); err != nil {
|
||||
t.Fatalf("%s: %v", tc.want, err)
|
||||
}
|
||||
|
||||
gold, err := ioutil.ReadFile(tc.want)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", tc.want, err)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
gold = bytes.Replace(gold, []byte("testdata/"), []byte("testdata\\"), -1)
|
||||
}
|
||||
if string(b.String()) != string(gold) {
|
||||
d, err := proftest.Diff(gold, b.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", "source", err)
|
||||
}
|
||||
t.Error("source" + "\n" + string(d) + "\n" + "gold:\n" + tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var testM = []*profile.Mapping{
|
||||
{
|
||||
ID: 1,
|
||||
HasFunctions: true,
|
||||
HasFilenames: true,
|
||||
HasLineNumbers: true,
|
||||
HasInlineFrames: true,
|
||||
},
|
||||
}
|
||||
|
||||
var testF = []*profile.Function{
|
||||
{
|
||||
ID: 1,
|
||||
Name: "main",
|
||||
Filename: "testdata/source1",
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Name: "foo",
|
||||
Filename: "testdata/source1",
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Name: "bar",
|
||||
Filename: "testdata/source1",
|
||||
},
|
||||
{
|
||||
ID: 4,
|
||||
Name: "tee",
|
||||
Filename: "testdata/source2",
|
||||
},
|
||||
}
|
||||
|
||||
var testL = []*profile.Location{
|
||||
{
|
||||
ID: 1,
|
||||
Mapping: testM[0],
|
||||
Line: []profile.Line{
|
||||
{
|
||||
Function: testF[0],
|
||||
Line: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Mapping: testM[0],
|
||||
Line: []profile.Line{
|
||||
{
|
||||
Function: testF[1],
|
||||
Line: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Mapping: testM[0],
|
||||
Line: []profile.Line{
|
||||
{
|
||||
Function: testF[2],
|
||||
Line: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 4,
|
||||
Mapping: testM[0],
|
||||
Line: []profile.Line{
|
||||
{
|
||||
Function: testF[3],
|
||||
Line: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 5,
|
||||
Mapping: testM[0],
|
||||
Line: []profile.Line{
|
||||
{
|
||||
Function: testF[3],
|
||||
Line: 8,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var testProfile = &profile.Profile{
|
||||
PeriodType: &profile.ValueType{Type: "cpu", Unit: "millisecond"},
|
||||
Period: 10,
|
||||
DurationNanos: 10e9,
|
||||
SampleType: []*profile.ValueType{
|
||||
{Type: "samples", Unit: "count"},
|
||||
{Type: "cpu", Unit: "cycles"},
|
||||
},
|
||||
Sample: []*profile.Sample{
|
||||
{
|
||||
Location: []*profile.Location{testL[0]},
|
||||
Value: []int64{1, 1},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[2], testL[1], testL[0]},
|
||||
Value: []int64{1, 10},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[4], testL[2], testL[0]},
|
||||
Value: []int64{1, 100},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[3], testL[0]},
|
||||
Value: []int64{1, 1000},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[4], testL[3], testL[0]},
|
||||
Value: []int64{1, 10000},
|
||||
},
|
||||
},
|
||||
Location: testL,
|
||||
Function: testF,
|
||||
Mapping: testM,
|
||||
}
|
||||
|
||||
func TestDisambiguation(t *testing.T) {
|
||||
parent1 := &graph.Node{Info: graph.NodeInfo{Name: "parent1"}}
|
||||
parent2 := &graph.Node{Info: graph.NodeInfo{Name: "parent2"}}
|
||||
child1 := &graph.Node{Info: graph.NodeInfo{Name: "child"}, Function: parent1}
|
||||
child2 := &graph.Node{Info: graph.NodeInfo{Name: "child"}, Function: parent2}
|
||||
child3 := &graph.Node{Info: graph.NodeInfo{Name: "child"}, Function: parent1}
|
||||
sibling := &graph.Node{Info: graph.NodeInfo{Name: "sibling"}, Function: parent1}
|
||||
|
||||
n := []*graph.Node{parent1, parent2, child1, child2, child3, sibling}
|
||||
|
||||
wanted := map[*graph.Node]string{
|
||||
parent1: "parent1",
|
||||
parent2: "parent2",
|
||||
child1: "child [1/2]",
|
||||
child2: "child [2/2]",
|
||||
child3: "child [1/2]",
|
||||
sibling: "sibling",
|
||||
}
|
||||
|
||||
g := &graph.Graph{n}
|
||||
|
||||
names := getDisambiguatedNames(g)
|
||||
|
||||
for node, want := range wanted {
|
||||
if got := names[node]; got != want {
|
||||
t.Errorf("name %s, got %s, want %s", node.Info.Name, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFunctionMap(t *testing.T) {
|
||||
|
||||
fm := make(functionMap)
|
||||
nodes := []graph.NodeInfo{
|
||||
{Name: "fun1"},
|
||||
{Name: "fun2", File: "filename"},
|
||||
{Name: "fun1"},
|
||||
{Name: "fun2", File: "filename2"},
|
||||
}
|
||||
|
||||
want := []profile.Function{
|
||||
{ID: 1, Name: "fun1"},
|
||||
{ID: 2, Name: "fun2", Filename: "filename"},
|
||||
{ID: 1, Name: "fun1"},
|
||||
{ID: 3, Name: "fun2", Filename: "filename2"},
|
||||
}
|
||||
|
||||
for i, tc := range nodes {
|
||||
if got, want := fm.FindOrAdd(tc), want[i]; *got != want {
|
||||
t.Errorf("%d: want %v, got %v", i, want, got)
|
||||
}
|
||||
}
|
||||
}
|
494
src/cmd/vendor/github.com/google/pprof/internal/report/source.go
generated
vendored
Normal file
494
src/cmd/vendor/github.com/google/pprof/internal/report/source.go
generated
vendored
Normal file
|
@ -0,0 +1,494 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package report
|
||||
|
||||
// This file contains routines related to the generation of annotated
|
||||
// source listings.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/graph"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
)
|
||||
|
||||
// printSource prints an annotated source listing, include all
|
||||
// functions with samples that match the regexp rpt.options.symbol.
|
||||
// The sources are sorted by function name and then by filename to
|
||||
// eliminate potential nondeterminism.
|
||||
func printSource(w io.Writer, rpt *Report) error {
|
||||
o := rpt.options
|
||||
g := rpt.newGraph(nil)
|
||||
|
||||
// Identify all the functions that match the regexp provided.
|
||||
// Group nodes for each matching function.
|
||||
var functions graph.Nodes
|
||||
functionNodes := make(map[string]graph.Nodes)
|
||||
for _, n := range g.Nodes {
|
||||
if !o.Symbol.MatchString(n.Info.Name) {
|
||||
continue
|
||||
}
|
||||
if functionNodes[n.Info.Name] == nil {
|
||||
functions = append(functions, n)
|
||||
}
|
||||
functionNodes[n.Info.Name] = append(functionNodes[n.Info.Name], n)
|
||||
}
|
||||
functions.Sort(graph.NameOrder)
|
||||
|
||||
sourcePath := o.SourcePath
|
||||
if sourcePath == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not stat current dir: %v", err)
|
||||
}
|
||||
sourcePath = wd
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total))
|
||||
for _, fn := range functions {
|
||||
name := fn.Info.Name
|
||||
|
||||
// Identify all the source files associated to this function.
|
||||
// Group nodes for each source file.
|
||||
var sourceFiles graph.Nodes
|
||||
fileNodes := make(map[string]graph.Nodes)
|
||||
for _, n := range functionNodes[name] {
|
||||
if n.Info.File == "" {
|
||||
continue
|
||||
}
|
||||
if fileNodes[n.Info.File] == nil {
|
||||
sourceFiles = append(sourceFiles, n)
|
||||
}
|
||||
fileNodes[n.Info.File] = append(fileNodes[n.Info.File], n)
|
||||
}
|
||||
|
||||
if len(sourceFiles) == 0 {
|
||||
fmt.Fprintf(w, "No source information for %s\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
sourceFiles.Sort(graph.FileOrder)
|
||||
|
||||
// Print each file associated with this function.
|
||||
for _, fl := range sourceFiles {
|
||||
filename := fl.Info.File
|
||||
fns := fileNodes[filename]
|
||||
flatSum, cumSum := fns.Sum()
|
||||
|
||||
fnodes, _, err := getSourceFromFile(filename, sourcePath, fns, 0, 0)
|
||||
fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, filename)
|
||||
fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n",
|
||||
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
|
||||
percentage(cumSum, rpt.total))
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(w, " Error: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, fn := range fnodes {
|
||||
fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), fn.Info.Lineno, fn.Info.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// printWebSource prints an annotated source listing, include all
|
||||
// functions with samples that match the regexp rpt.options.symbol.
|
||||
func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
|
||||
o := rpt.options
|
||||
g := rpt.newGraph(nil)
|
||||
|
||||
// If the regexp source can be parsed as an address, also match
|
||||
// functions that land on that address.
|
||||
var address *uint64
|
||||
if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
|
||||
address = &hex
|
||||
}
|
||||
|
||||
sourcePath := o.SourcePath
|
||||
if sourcePath == "" {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not stat current dir: %v", err)
|
||||
}
|
||||
sourcePath = wd
|
||||
}
|
||||
|
||||
type fileFunction struct {
|
||||
fileName, functionName string
|
||||
}
|
||||
|
||||
// Extract interesting symbols from binary files in the profile and
|
||||
// classify samples per symbol.
|
||||
symbols := symbolsFromBinaries(rpt.prof, g, o.Symbol, address, obj)
|
||||
symNodes := nodesPerSymbol(g.Nodes, symbols)
|
||||
|
||||
// Identify sources associated to a symbol by examining
|
||||
// symbol samples. Classify samples per source file.
|
||||
fileNodes := make(map[fileFunction]graph.Nodes)
|
||||
if len(symNodes) == 0 {
|
||||
for _, n := range g.Nodes {
|
||||
if n.Info.File == "" || !o.Symbol.MatchString(n.Info.Name) {
|
||||
continue
|
||||
}
|
||||
ff := fileFunction{n.Info.File, n.Info.Name}
|
||||
fileNodes[ff] = append(fileNodes[ff], n)
|
||||
}
|
||||
} else {
|
||||
for _, nodes := range symNodes {
|
||||
for _, n := range nodes {
|
||||
if n.Info.File != "" {
|
||||
ff := fileFunction{n.Info.File, n.Info.Name}
|
||||
fileNodes[ff] = append(fileNodes[ff], n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(fileNodes) == 0 {
|
||||
return fmt.Errorf("No source information for %s\n", o.Symbol.String())
|
||||
}
|
||||
|
||||
sourceFiles := make(graph.Nodes, 0, len(fileNodes))
|
||||
for _, nodes := range fileNodes {
|
||||
sNode := *nodes[0]
|
||||
sNode.Flat, sNode.Cum = nodes.Sum()
|
||||
sourceFiles = append(sourceFiles, &sNode)
|
||||
}
|
||||
sourceFiles.Sort(graph.FileOrder)
|
||||
|
||||
// Print each file associated with this function.
|
||||
printHeader(w, rpt)
|
||||
for _, n := range sourceFiles {
|
||||
ff := fileFunction{n.Info.File, n.Info.Name}
|
||||
fns := fileNodes[ff]
|
||||
|
||||
asm := assemblyPerSourceLine(symbols, fns, ff.fileName, obj)
|
||||
start, end := sourceCoordinates(asm)
|
||||
|
||||
fnodes, path, err := getSourceFromFile(ff.fileName, sourcePath, fns, start, end)
|
||||
if err != nil {
|
||||
fnodes, path = getMissingFunctionSource(ff.fileName, asm, start, end)
|
||||
}
|
||||
|
||||
printFunctionHeader(w, ff.functionName, path, n.Flat, n.Cum, rpt)
|
||||
for _, fn := range fnodes {
|
||||
printFunctionSourceLine(w, fn, asm[fn.Info.Lineno], rpt)
|
||||
}
|
||||
printFunctionClosing(w)
|
||||
}
|
||||
printPageClosing(w)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sourceCoordinates returns the lowest and highest line numbers from
|
||||
// a set of assembly statements.
|
||||
func sourceCoordinates(asm map[int][]assemblyInstruction) (start, end int) {
|
||||
for l := range asm {
|
||||
if start == 0 || l < start {
|
||||
start = l
|
||||
}
|
||||
if end == 0 || l > end {
|
||||
end = l
|
||||
}
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// assemblyPerSourceLine disassembles the binary containing a symbol
|
||||
// and classifies the assembly instructions according to its
|
||||
// corresponding source line, annotating them with a set of samples.
|
||||
func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj plugin.ObjTool) map[int][]assemblyInstruction {
|
||||
assembly := make(map[int][]assemblyInstruction)
|
||||
// Identify symbol to use for this collection of samples.
|
||||
o := findMatchingSymbol(objSyms, rs)
|
||||
if o == nil {
|
||||
return assembly
|
||||
}
|
||||
|
||||
// Extract assembly for matched symbol
|
||||
insts, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End)
|
||||
if err != nil {
|
||||
return assembly
|
||||
}
|
||||
|
||||
srcBase := filepath.Base(src)
|
||||
anodes := annotateAssembly(insts, rs, o.base)
|
||||
var lineno = 0
|
||||
for _, an := range anodes {
|
||||
if filepath.Base(an.file) == srcBase {
|
||||
lineno = an.line
|
||||
}
|
||||
if lineno != 0 {
|
||||
assembly[lineno] = append(assembly[lineno], an)
|
||||
}
|
||||
}
|
||||
|
||||
return assembly
|
||||
}
|
||||
|
||||
// findMatchingSymbol looks for the symbol that corresponds to a set
|
||||
// of samples, by comparing their addresses.
|
||||
func findMatchingSymbol(objSyms []*objSymbol, ns graph.Nodes) *objSymbol {
|
||||
for _, n := range ns {
|
||||
for _, o := range objSyms {
|
||||
if filepath.Base(o.sym.File) == filepath.Base(n.Info.Objfile) &&
|
||||
o.sym.Start <= n.Info.Address-o.base &&
|
||||
n.Info.Address-o.base <= o.sym.End {
|
||||
return o
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// printHeader prints the page header for a weblist report.
|
||||
func printHeader(w io.Writer, rpt *Report) {
|
||||
fmt.Fprintln(w, weblistPageHeader)
|
||||
|
||||
var labels []string
|
||||
for _, l := range ProfileLabels(rpt) {
|
||||
labels = append(labels, template.HTMLEscapeString(l))
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, `<div class="legend">%s<br>Total: %s</div>`,
|
||||
strings.Join(labels, "<br>\n"),
|
||||
rpt.formatValue(rpt.total),
|
||||
)
|
||||
}
|
||||
|
||||
// printFunctionHeader prints a function header for a weblist report.
|
||||
func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, rpt *Report) {
|
||||
fmt.Fprintf(w, `<h1>%s</h1>%s
|
||||
<pre onClick="pprof_toggle_asm(event)">
|
||||
Total: %10s %10s (flat, cum) %s
|
||||
`,
|
||||
template.HTMLEscapeString(name), template.HTMLEscapeString(path),
|
||||
rpt.formatValue(flatSum), rpt.formatValue(cumSum),
|
||||
percentage(cumSum, rpt.total))
|
||||
}
|
||||
|
||||
// printFunctionSourceLine prints a source line and the corresponding assembly.
|
||||
func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyInstruction, rpt *Report) {
|
||||
if len(assembly) == 0 {
|
||||
fmt.Fprintf(w,
|
||||
"<span class=line> %6d</span> <span class=nop> %10s %10s %s </span>\n",
|
||||
fn.Info.Lineno,
|
||||
valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt),
|
||||
template.HTMLEscapeString(fn.Info.Name))
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w,
|
||||
"<span class=line> %6d</span> <span class=deadsrc> %10s %10s %s </span>",
|
||||
fn.Info.Lineno,
|
||||
valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt),
|
||||
template.HTMLEscapeString(fn.Info.Name))
|
||||
fmt.Fprint(w, "<span class=asm>")
|
||||
for _, an := range assembly {
|
||||
var fileline string
|
||||
class := "disasmloc"
|
||||
if an.file != "" {
|
||||
fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.file), an.line)
|
||||
if an.line != fn.Info.Lineno {
|
||||
class = "unimportant"
|
||||
}
|
||||
}
|
||||
flat, cum := an.flat, an.cum
|
||||
if an.flatDiv != 0 {
|
||||
flat = flat / an.flatDiv
|
||||
}
|
||||
if an.cumDiv != 0 {
|
||||
cum = cum / an.cumDiv
|
||||
}
|
||||
fmt.Fprintf(w, " %8s %10s %10s %8x: %-48s <span class=%s>%s</span>\n", "",
|
||||
valueOrDot(flat, rpt), valueOrDot(cum, rpt),
|
||||
an.address,
|
||||
template.HTMLEscapeString(an.instruction),
|
||||
class,
|
||||
template.HTMLEscapeString(fileline))
|
||||
}
|
||||
fmt.Fprintln(w, "</span>")
|
||||
}
|
||||
|
||||
// printFunctionClosing prints the end of a function in a weblist report.
|
||||
func printFunctionClosing(w io.Writer) {
|
||||
fmt.Fprintln(w, "</pre>")
|
||||
}
|
||||
|
||||
// printPageClosing prints the end of the page in a weblist report.
|
||||
func printPageClosing(w io.Writer) {
|
||||
fmt.Fprintln(w, weblistPageClosing)
|
||||
}
|
||||
|
||||
// getSourceFromFile collects the sources of a function from a source
|
||||
// file and annotates it with the samples in fns. Returns the sources
|
||||
// as nodes, using the info.name field to hold the source code.
|
||||
func getSourceFromFile(file, sourcePath string, fns graph.Nodes, start, end int) (graph.Nodes, string, error) {
|
||||
file = trimPath(file)
|
||||
f, err := openSourceFile(file, sourcePath)
|
||||
if err != nil {
|
||||
return nil, file, err
|
||||
}
|
||||
|
||||
lineNodes := make(map[int]graph.Nodes)
|
||||
// Collect source coordinates from profile.
|
||||
const margin = 5 // Lines before first/after last sample.
|
||||
if start == 0 {
|
||||
if fns[0].Info.StartLine != 0 {
|
||||
start = fns[0].Info.StartLine
|
||||
} else {
|
||||
start = fns[0].Info.Lineno - margin
|
||||
}
|
||||
} else {
|
||||
start -= margin
|
||||
}
|
||||
if end == 0 {
|
||||
end = fns[0].Info.Lineno
|
||||
}
|
||||
end += margin
|
||||
for _, n := range fns {
|
||||
lineno := n.Info.Lineno
|
||||
nodeStart := n.Info.StartLine
|
||||
if nodeStart == 0 {
|
||||
nodeStart = lineno - margin
|
||||
}
|
||||
nodeEnd := lineno + margin
|
||||
if nodeStart < start {
|
||||
start = nodeStart
|
||||
} else if nodeEnd > end {
|
||||
end = nodeEnd
|
||||
}
|
||||
lineNodes[lineno] = append(lineNodes[lineno], n)
|
||||
}
|
||||
|
||||
var src graph.Nodes
|
||||
buf := bufio.NewReader(f)
|
||||
lineno := 1
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, file, err
|
||||
}
|
||||
if line == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if lineno >= start {
|
||||
flat, cum := lineNodes[lineno].Sum()
|
||||
|
||||
src = append(src, &graph.Node{
|
||||
Info: graph.NodeInfo{
|
||||
Name: strings.TrimRight(line, "\n"),
|
||||
Lineno: lineno,
|
||||
},
|
||||
Flat: flat,
|
||||
Cum: cum,
|
||||
})
|
||||
}
|
||||
lineno++
|
||||
if lineno > end {
|
||||
break
|
||||
}
|
||||
}
|
||||
return src, file, nil
|
||||
}
|
||||
|
||||
// getMissingFunctionSource creates a dummy function body to point to
|
||||
// the source file and annotates it with the samples in asm.
|
||||
func getMissingFunctionSource(filename string, asm map[int][]assemblyInstruction, start, end int) (graph.Nodes, string) {
|
||||
var fnodes graph.Nodes
|
||||
for i := start; i <= end; i++ {
|
||||
insts := asm[i]
|
||||
if len(insts) == 0 {
|
||||
continue
|
||||
}
|
||||
var group assemblyInstruction
|
||||
for _, insn := range insts {
|
||||
group.flat += insn.flat
|
||||
group.cum += insn.cum
|
||||
group.flatDiv += insn.flatDiv
|
||||
group.cumDiv += insn.cumDiv
|
||||
}
|
||||
flat := group.flatValue()
|
||||
cum := group.cumValue()
|
||||
fnodes = append(fnodes, &graph.Node{
|
||||
Info: graph.NodeInfo{
|
||||
Name: "???",
|
||||
Lineno: i,
|
||||
},
|
||||
Flat: flat,
|
||||
Cum: cum,
|
||||
})
|
||||
}
|
||||
return fnodes, filename
|
||||
}
|
||||
|
||||
// openSourceFile opens a source file from a name encoded in a
|
||||
// profile. File names in a profile after often relative paths, so
|
||||
// search them in each of the paths in searchPath (or CWD by default),
|
||||
// and their parents.
|
||||
func openSourceFile(path, searchPath string) (*os.File, error) {
|
||||
if filepath.IsAbs(path) {
|
||||
f, err := os.Open(path)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Scan each component of the path
|
||||
for _, dir := range strings.Split(searchPath, ":") {
|
||||
// Search up for every parent of each possible path.
|
||||
for {
|
||||
filename := filepath.Join(dir, path)
|
||||
if f, err := os.Open(filename); err == nil {
|
||||
return f, nil
|
||||
}
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
break
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Could not find file %s on path %s", path, searchPath)
|
||||
}
|
||||
|
||||
// trimPath cleans up a path by removing prefixes that are commonly
|
||||
// found on profiles.
|
||||
func trimPath(path string) string {
|
||||
basePaths := []string{
|
||||
"/proc/self/cwd/./",
|
||||
"/proc/self/cwd/",
|
||||
}
|
||||
|
||||
sPath := filepath.ToSlash(path)
|
||||
|
||||
for _, base := range basePaths {
|
||||
if strings.HasPrefix(sPath, base) {
|
||||
return filepath.FromSlash(sPath[len(base):])
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
|
@ -1,6 +1,16 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package report
|
||||
|
17
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.dot
generated
vendored
Normal file
17
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.dot
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
digraph "unnamed" {
|
||||
node [style=filled fillcolor="#f8f8f8"]
|
||||
subgraph cluster_L { "Duration: 10s, Total samples = 11111 " [shape=box fontsize=16 label="Duration: 10s, Total samples = 11111 \lShowing nodes accounting for 11111, 100% of 11111 total\l"] }
|
||||
N1 [label="tee\nsource2:8\n10000 (90.00%)" fontsize=24 shape=box tooltip="tee testdata/source2:8 (10000)" color="#b20500" fillcolor="#edd6d5"]
|
||||
N2 [label="main\nsource1:2\n1 (0.009%)\nof 11111 (100%)" fontsize=9 shape=box tooltip="main testdata/source1:2 (11111)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N3 [label="tee\nsource2:2\n1000 (9.00%)\nof 11000 (99.00%)" fontsize=14 shape=box tooltip="tee testdata/source2:2 (11000)" color="#b20000" fillcolor="#edd5d5"]
|
||||
N4 [label="tee\nsource2:8\n100 (0.9%)" fontsize=10 shape=box tooltip="tee testdata/source2:8 (100)" color="#b2b0aa" fillcolor="#edecec"]
|
||||
N5 [label="bar\nsource1:10\n10 (0.09%)" fontsize=9 shape=box tooltip="bar testdata/source1:10 (10)" color="#b2b2b1" fillcolor="#ededed"]
|
||||
N6 [label="bar\nsource1:10\n0 of 100 (0.9%)" fontsize=8 shape=box tooltip="bar testdata/source1:10 (100)" color="#b2b0aa" fillcolor="#edecec"]
|
||||
N7 [label="foo\nsource1:4\n0 of 10 (0.09%)" fontsize=8 shape=box tooltip="foo testdata/source1:4 (10)" color="#b2b2b1" fillcolor="#ededed"]
|
||||
N2 -> N3 [label=" 11000" weight=100 penwidth=5 color="#b20000" tooltip="main testdata/source1:2 -> tee testdata/source2:2 (11000)" labeltooltip="main testdata/source1:2 -> tee testdata/source2:2 (11000)"]
|
||||
N3 -> N1 [label=" 10000" weight=91 penwidth=5 color="#b20500" tooltip="tee testdata/source2:2 -> tee testdata/source2:8 (10000)" labeltooltip="tee testdata/source2:2 -> tee testdata/source2:8 (10000)"]
|
||||
N6 -> N4 [label=" 100" color="#b2b0aa" tooltip="bar testdata/source1:10 -> tee testdata/source2:8 (100)" labeltooltip="bar testdata/source1:10 -> tee testdata/source2:8 (100)"]
|
||||
N2 -> N6 [label=" 100" color="#b2b0aa" tooltip="main testdata/source1:2 -> bar testdata/source1:10 (100)" labeltooltip="main testdata/source1:2 -> bar testdata/source1:10 (100)"]
|
||||
N7 -> N5 [label=" 10" color="#b2b2b1" tooltip="foo testdata/source1:4 -> bar testdata/source1:10 (10)" labeltooltip="foo testdata/source1:4 -> bar testdata/source1:10 (10)"]
|
||||
N2 -> N7 [label=" 10" color="#b2b2b1" tooltip="main testdata/source1:2 -> foo testdata/source1:4 (10)" labeltooltip="main testdata/source1:2 -> foo testdata/source1:4 (10)"]
|
||||
}
|
49
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.rpt
generated
vendored
Normal file
49
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.rpt
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
Total: 11111
|
||||
ROUTINE ======================== bar in testdata/source1
|
||||
10 110 (flat, cum) 0.99% of Total
|
||||
. . 5:source1 line 5;
|
||||
. . 6:source1 line 6;
|
||||
. . 7:source1 line 7;
|
||||
. . 8:source1 line 8;
|
||||
. . 9:source1 line 9;
|
||||
10 110 10:source1 line 10;
|
||||
. . 11:source1 line 11;
|
||||
. . 12:source1 line 12;
|
||||
. . 13:source1 line 13;
|
||||
. . 14:source1 line 14;
|
||||
. . 15:source1 line 15;
|
||||
ROUTINE ======================== foo in testdata/source1
|
||||
0 10 (flat, cum) 0.09% of Total
|
||||
. . 1:source1 line 1;
|
||||
. . 2:source1 line 2;
|
||||
. . 3:source1 line 3;
|
||||
. 10 4:source1 line 4;
|
||||
. . 5:source1 line 5;
|
||||
. . 6:source1 line 6;
|
||||
. . 7:source1 line 7;
|
||||
. . 8:source1 line 8;
|
||||
. . 9:source1 line 9;
|
||||
ROUTINE ======================== main in testdata/source1
|
||||
1 11111 (flat, cum) 100% of Total
|
||||
. . 1:source1 line 1;
|
||||
1 11111 2:source1 line 2;
|
||||
. . 3:source1 line 3;
|
||||
. . 4:source1 line 4;
|
||||
. . 5:source1 line 5;
|
||||
. . 6:source1 line 6;
|
||||
. . 7:source1 line 7;
|
||||
ROUTINE ======================== tee in testdata/source2
|
||||
11100 21100 (flat, cum) 189.90% of Total
|
||||
. . 1:source2 line 1;
|
||||
1000 11000 2:source2 line 2;
|
||||
. . 3:source2 line 3;
|
||||
. . 4:source2 line 4;
|
||||
. . 5:source2 line 5;
|
||||
. . 6:source2 line 6;
|
||||
. . 7:source2 line 7;
|
||||
10100 10100 8:source2 line 8;
|
||||
. . 9:source2 line 9;
|
||||
. . 10:source2 line 10;
|
||||
. . 11:source2 line 11;
|
||||
. . 12:source2 line 12;
|
||||
. . 13:source2 line 13;
|
19
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source1
generated
vendored
Normal file
19
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source1
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
source1 line 1;
|
||||
source1 line 2;
|
||||
source1 line 3;
|
||||
source1 line 4;
|
||||
source1 line 5;
|
||||
source1 line 6;
|
||||
source1 line 7;
|
||||
source1 line 8;
|
||||
source1 line 9;
|
||||
source1 line 10;
|
||||
source1 line 11;
|
||||
source1 line 12;
|
||||
source1 line 13;
|
||||
source1 line 14;
|
||||
source1 line 15;
|
||||
source1 line 16;
|
||||
source1 line 17;
|
||||
source1 line 18;
|
||||
|
19
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source2
generated
vendored
Normal file
19
src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source2
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
source2 line 1;
|
||||
source2 line 2;
|
||||
source2 line 3;
|
||||
source2 line 4;
|
||||
source2 line 5;
|
||||
source2 line 6;
|
||||
source2 line 7;
|
||||
source2 line 8;
|
||||
source2 line 9;
|
||||
source2 line 10;
|
||||
source2 line 11;
|
||||
source2 line 12;
|
||||
source2 line 13;
|
||||
source2 line 14;
|
||||
source2 line 15;
|
||||
source2 line 16;
|
||||
source2 line 17;
|
||||
source2 line 18;
|
||||
|
356
src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
generated
vendored
Normal file
356
src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
generated
vendored
Normal file
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package symbolizer provides a routine to populate a profile with
|
||||
// symbol, file and line number information. It relies on the
|
||||
// addr2liner and demangle packages to do the actual work.
|
||||
package symbolizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/pprof/internal/binutils"
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/symbolz"
|
||||
"github.com/google/pprof/profile"
|
||||
"github.com/ianlancetaylor/demangle"
|
||||
)
|
||||
|
||||
// Symbolizer implements the plugin.Symbolize interface.
|
||||
type Symbolizer struct {
|
||||
Obj plugin.ObjTool
|
||||
UI plugin.UI
|
||||
}
|
||||
|
||||
// test taps for dependency injection
|
||||
var symbolzSymbolize = symbolz.Symbolize
|
||||
var localSymbolize = doLocalSymbolize
|
||||
|
||||
// Symbolize attempts to symbolize profile p. First uses binutils on
|
||||
// local binaries; if the source is a URL it attempts to get any
|
||||
// missed entries using symbolz.
|
||||
func (s *Symbolizer) Symbolize(mode string, sources plugin.MappingSources, p *profile.Profile) error {
|
||||
remote, local, force, demanglerMode := true, true, false, ""
|
||||
for _, o := range strings.Split(strings.ToLower(mode), ":") {
|
||||
switch o {
|
||||
case "none", "no":
|
||||
return nil
|
||||
case "local", "fastlocal":
|
||||
remote, local = false, true
|
||||
case "remote":
|
||||
remote, local = true, false
|
||||
case "", "force":
|
||||
force = true
|
||||
default:
|
||||
switch d := strings.TrimPrefix(o, "demangle="); d {
|
||||
case "full", "none", "templates":
|
||||
demanglerMode = d
|
||||
force = true
|
||||
continue
|
||||
case "default":
|
||||
continue
|
||||
}
|
||||
s.UI.PrintErr("ignoring unrecognized symbolization option: " + mode)
|
||||
s.UI.PrintErr("expecting -symbolize=[local|fastlocal|remote|none][:force][:demangle=[none|full|templates|default]")
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if local {
|
||||
// Symbolize locally using binutils.
|
||||
if err = localSymbolize(mode, p, s.Obj, s.UI); err != nil {
|
||||
s.UI.PrintErr("local symbolization: " + err.Error())
|
||||
}
|
||||
}
|
||||
if remote {
|
||||
if err = symbolzSymbolize(sources, postURL, p, s.UI); err != nil {
|
||||
return err // Ran out of options.
|
||||
}
|
||||
}
|
||||
|
||||
Demangle(p, force, demanglerMode)
|
||||
return nil
|
||||
}
|
||||
|
||||
// postURL issues a POST to a URL over HTTP.
|
||||
func postURL(source, post string) ([]byte, error) {
|
||||
resp, err := http.Post(source, "application/octet-stream", strings.NewReader(post))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http post %s: %v", source, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, statusCodeError(resp)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
func statusCodeError(resp *http.Response) error {
|
||||
if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") {
|
||||
// error is from pprof endpoint
|
||||
if body, err := ioutil.ReadAll(resp.Body); err == nil {
|
||||
return fmt.Errorf("server response: %s - %s", resp.Status, body)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("server response: %s", resp.Status)
|
||||
}
|
||||
|
||||
// doLocalSymbolize adds symbol and line number information to all locations
|
||||
// in a profile. mode enables some options to control
|
||||
// symbolization.
|
||||
func doLocalSymbolize(mode string, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
|
||||
force := false
|
||||
// Disable some mechanisms based on mode string.
|
||||
for _, o := range strings.Split(strings.ToLower(mode), ":") {
|
||||
switch {
|
||||
case o == "force":
|
||||
force = true
|
||||
case o == "fastlocal":
|
||||
if bu, ok := obj.(*binutils.Binutils); ok {
|
||||
bu.SetFastSymbolization(true)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
mt, err := newMapping(prof, obj, ui, force)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer mt.close()
|
||||
|
||||
functions := make(map[profile.Function]*profile.Function)
|
||||
for _, l := range mt.prof.Location {
|
||||
m := l.Mapping
|
||||
segment := mt.segments[m]
|
||||
if segment == nil {
|
||||
// Nothing to do.
|
||||
continue
|
||||
}
|
||||
|
||||
stack, err := segment.SourceLine(l.Address)
|
||||
if err != nil || len(stack) == 0 {
|
||||
// No answers from addr2line.
|
||||
continue
|
||||
}
|
||||
|
||||
l.Line = make([]profile.Line, len(stack))
|
||||
for i, frame := range stack {
|
||||
if frame.Func != "" {
|
||||
m.HasFunctions = true
|
||||
}
|
||||
if frame.File != "" {
|
||||
m.HasFilenames = true
|
||||
}
|
||||
if frame.Line != 0 {
|
||||
m.HasLineNumbers = true
|
||||
}
|
||||
f := &profile.Function{
|
||||
Name: frame.Func,
|
||||
SystemName: frame.Func,
|
||||
Filename: frame.File,
|
||||
}
|
||||
if fp := functions[*f]; fp != nil {
|
||||
f = fp
|
||||
} else {
|
||||
functions[*f] = f
|
||||
f.ID = uint64(len(mt.prof.Function)) + 1
|
||||
mt.prof.Function = append(mt.prof.Function, f)
|
||||
}
|
||||
l.Line[i] = profile.Line{
|
||||
Function: f,
|
||||
Line: int64(frame.Line),
|
||||
}
|
||||
}
|
||||
|
||||
if len(stack) > 0 {
|
||||
m.HasInlineFrames = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Demangle updates the function names in a profile with demangled C++
|
||||
// names, simplified according to demanglerMode. If force is set,
|
||||
// overwrite any names that appear already demangled.
|
||||
func Demangle(prof *profile.Profile, force bool, demanglerMode string) {
|
||||
if force {
|
||||
// Remove the current demangled names to force demangling
|
||||
for _, f := range prof.Function {
|
||||
if f.Name != "" && f.SystemName != "" {
|
||||
f.Name = f.SystemName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var options []demangle.Option
|
||||
switch demanglerMode {
|
||||
case "": // demangled, simplified: no parameters, no templates, no return type
|
||||
options = []demangle.Option{demangle.NoParams, demangle.NoTemplateParams}
|
||||
case "templates": // demangled, simplified: no parameters, no return type
|
||||
options = []demangle.Option{demangle.NoParams}
|
||||
case "full":
|
||||
options = []demangle.Option{demangle.NoClones}
|
||||
case "none": // no demangling
|
||||
return
|
||||
}
|
||||
|
||||
// Copy the options because they may be updated by the call.
|
||||
o := make([]demangle.Option, len(options))
|
||||
for _, fn := range prof.Function {
|
||||
if fn.Name != "" && fn.SystemName != fn.Name {
|
||||
continue // Already demangled.
|
||||
}
|
||||
copy(o, options)
|
||||
if demangled := demangle.Filter(fn.SystemName, o...); demangled != fn.SystemName {
|
||||
fn.Name = demangled
|
||||
continue
|
||||
}
|
||||
// Could not demangle. Apply heuristics in case the name is
|
||||
// already demangled.
|
||||
name := fn.SystemName
|
||||
if looksLikeDemangledCPlusPlus(name) {
|
||||
if demanglerMode == "" || demanglerMode == "templates" {
|
||||
name = removeMatching(name, '(', ')')
|
||||
}
|
||||
if demanglerMode == "" {
|
||||
name = removeMatching(name, '<', '>')
|
||||
}
|
||||
}
|
||||
fn.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
// looksLikeDemangledCPlusPlus is a heuristic to decide if a name is
|
||||
// the result of demangling C++. If so, further heuristics will be
|
||||
// applied to simplify the name.
|
||||
func looksLikeDemangledCPlusPlus(demangled string) bool {
|
||||
if strings.Contains(demangled, ".<") { // Skip java names of the form "class.<init>"
|
||||
return false
|
||||
}
|
||||
return strings.ContainsAny(demangled, "<>[]") || strings.Contains(demangled, "::")
|
||||
}
|
||||
|
||||
// removeMatching removes nested instances of start..end from name.
|
||||
func removeMatching(name string, start, end byte) string {
|
||||
s := string(start) + string(end)
|
||||
var nesting, first, current int
|
||||
for index := strings.IndexAny(name[current:], s); index != -1; index = strings.IndexAny(name[current:], s) {
|
||||
switch current += index; name[current] {
|
||||
case start:
|
||||
nesting++
|
||||
if nesting == 1 {
|
||||
first = current
|
||||
}
|
||||
case end:
|
||||
nesting--
|
||||
switch {
|
||||
case nesting < 0:
|
||||
return name // Mismatch, abort
|
||||
case nesting == 0:
|
||||
name = name[:first] + name[current+1:]
|
||||
current = first - 1
|
||||
}
|
||||
}
|
||||
current++
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// newMapping creates a mappingTable for a profile.
|
||||
func newMapping(prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI, force bool) (*mappingTable, error) {
|
||||
mt := &mappingTable{
|
||||
prof: prof,
|
||||
segments: make(map[*profile.Mapping]plugin.ObjFile),
|
||||
}
|
||||
|
||||
// Identify used mappings
|
||||
mappings := make(map[*profile.Mapping]bool)
|
||||
for _, l := range prof.Location {
|
||||
mappings[l.Mapping] = true
|
||||
}
|
||||
|
||||
missingBinaries := false
|
||||
for midx, m := range prof.Mapping {
|
||||
if !mappings[m] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Do not attempt to re-symbolize a mapping that has already been symbolized.
|
||||
if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) {
|
||||
continue
|
||||
}
|
||||
|
||||
if m.File == "" {
|
||||
if midx == 0 {
|
||||
ui.PrintErr("Main binary filename not available.")
|
||||
continue
|
||||
}
|
||||
missingBinaries = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip well-known system mappings
|
||||
if m.Unsymbolizable() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip mappings pointing to a source URL
|
||||
if m.BuildID == "" {
|
||||
if u, err := url.Parse(m.File); err == nil && u.IsAbs() && strings.Contains(strings.ToLower(u.Scheme), "http") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
name := filepath.Base(m.File)
|
||||
f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset)
|
||||
if err != nil {
|
||||
ui.PrintErr("Local symbolization failed for ", name, ": ", err)
|
||||
missingBinaries = true
|
||||
continue
|
||||
}
|
||||
if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID {
|
||||
ui.PrintErr("Local symbolization failed for ", name, ": build ID mismatch")
|
||||
f.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
mt.segments[m] = f
|
||||
}
|
||||
if missingBinaries {
|
||||
ui.PrintErr("Some binary filenames not available. Symbolization may be incomplete.\n" +
|
||||
"Try setting PPROF_BINARY_PATH to the search path for local binaries.")
|
||||
}
|
||||
return mt, nil
|
||||
}
|
||||
|
||||
// mappingTable contains the mechanisms for symbolization of a
|
||||
// profile.
|
||||
type mappingTable struct {
|
||||
prof *profile.Profile
|
||||
segments map[*profile.Mapping]plugin.ObjFile
|
||||
}
|
||||
|
||||
// Close releases any external processes being used for the mapping.
|
||||
func (mt *mappingTable) close() {
|
||||
for _, segment := range mt.segments {
|
||||
segment.Close()
|
||||
}
|
||||
}
|
253
src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer_test.go
generated
vendored
Normal file
253
src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,253 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package symbolizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/pprof/internal/plugin"
|
||||
"github.com/google/pprof/internal/proftest"
|
||||
"github.com/google/pprof/profile"
|
||||
)
|
||||
|
||||
var testM = []*profile.Mapping{
|
||||
{
|
||||
ID: 1,
|
||||
Start: 0x1000,
|
||||
Limit: 0x5000,
|
||||
File: "mapping",
|
||||
},
|
||||
}
|
||||
|
||||
var testL = []*profile.Location{
|
||||
{
|
||||
ID: 1,
|
||||
Mapping: testM[0],
|
||||
Address: 1000,
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Mapping: testM[0],
|
||||
Address: 2000,
|
||||
},
|
||||
{
|
||||
ID: 3,
|
||||
Mapping: testM[0],
|
||||
Address: 3000,
|
||||
},
|
||||
{
|
||||
ID: 4,
|
||||
Mapping: testM[0],
|
||||
Address: 4000,
|
||||
},
|
||||
{
|
||||
ID: 5,
|
||||
Mapping: testM[0],
|
||||
Address: 5000,
|
||||
},
|
||||
}
|
||||
|
||||
var testProfile = profile.Profile{
|
||||
DurationNanos: 10e9,
|
||||
SampleType: []*profile.ValueType{
|
||||
{Type: "cpu", Unit: "cycles"},
|
||||
},
|
||||
Sample: []*profile.Sample{
|
||||
{
|
||||
Location: []*profile.Location{testL[0]},
|
||||
Value: []int64{1},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[1], testL[0]},
|
||||
Value: []int64{10},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[2], testL[0]},
|
||||
Value: []int64{100},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[3], testL[0]},
|
||||
Value: []int64{1},
|
||||
},
|
||||
{
|
||||
Location: []*profile.Location{testL[4], testL[3], testL[0]},
|
||||
Value: []int64{10000},
|
||||
},
|
||||
},
|
||||
Location: testL,
|
||||
Mapping: testM,
|
||||
PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"},
|
||||
Period: 10,
|
||||
}
|
||||
|
||||
func TestSymbolization(t *testing.T) {
|
||||
sSym := symbolzSymbolize
|
||||
lSym := localSymbolize
|
||||
defer func() {
|
||||
symbolzSymbolize = sSym
|
||||
localSymbolize = lSym
|
||||
}()
|
||||
symbolzSymbolize = symbolzMock
|
||||
localSymbolize = localMock
|
||||
|
||||
type testcase struct {
|
||||
mode string
|
||||
wantComment string
|
||||
}
|
||||
|
||||
s := Symbolizer{
|
||||
mockObjTool{},
|
||||
&proftest.TestUI{T: t},
|
||||
}
|
||||
for i, tc := range []testcase{
|
||||
{
|
||||
"local",
|
||||
"local=local",
|
||||
},
|
||||
{
|
||||
"fastlocal",
|
||||
"local=fastlocal",
|
||||
},
|
||||
{
|
||||
"remote",
|
||||
"symbolz",
|
||||
},
|
||||
{
|
||||
"",
|
||||
"local=:symbolz",
|
||||
},
|
||||
} {
|
||||
prof := testProfile.Copy()
|
||||
if err := s.Symbolize(tc.mode, nil, prof); err != nil {
|
||||
t.Errorf("symbolize #%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if got, want := strings.Join(prof.Comments, ":"), tc.wantComment; got != want {
|
||||
t.Errorf("got %s, want %s", got, want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func symbolzMock(sources plugin.MappingSources, syms func(string, string) ([]byte, error), p *profile.Profile, ui plugin.UI) error {
|
||||
p.Comments = append(p.Comments, "symbolz")
|
||||
return nil
|
||||
}
|
||||
|
||||
func localMock(mode string, p *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error {
|
||||
p.Comments = append(p.Comments, "local="+mode)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestLocalSymbolization(t *testing.T) {
|
||||
prof := testProfile.Copy()
|
||||
|
||||
if prof.HasFunctions() {
|
||||
t.Error("unexpected function names")
|
||||
}
|
||||
if prof.HasFileLines() {
|
||||
t.Error("unexpected filenames or line numbers")
|
||||
}
|
||||
|
||||
b := mockObjTool{}
|
||||
if err := localSymbolize("", prof, b, &proftest.TestUI{T: t}); err != nil {
|
||||
t.Fatalf("localSymbolize(): %v", err)
|
||||
}
|
||||
|
||||
for _, loc := range prof.Location {
|
||||
if err := checkSymbolizedLocation(loc.Address, loc.Line); err != nil {
|
||||
t.Errorf("location %d: %v", loc.Address, err)
|
||||
}
|
||||
}
|
||||
if !prof.HasFunctions() {
|
||||
t.Error("missing function names")
|
||||
}
|
||||
if !prof.HasFileLines() {
|
||||
t.Error("missing filenames or line numbers")
|
||||
}
|
||||
}
|
||||
|
||||
func checkSymbolizedLocation(a uint64, got []profile.Line) error {
|
||||
want, ok := mockAddresses[a]
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected address")
|
||||
}
|
||||
if len(want) != len(got) {
|
||||
return fmt.Errorf("want len %d, got %d", len(want), len(got))
|
||||
}
|
||||
|
||||
for i, w := range want {
|
||||
g := got[i]
|
||||
if g.Function.Name != w.Func {
|
||||
return fmt.Errorf("want function: %q, got %q", w.Func, g.Function.Name)
|
||||
}
|
||||
if g.Function.Filename != w.File {
|
||||
return fmt.Errorf("want filename: %q, got %q", w.File, g.Function.Filename)
|
||||
}
|
||||
if g.Line != int64(w.Line) {
|
||||
return fmt.Errorf("want lineno: %d, got %d", w.Line, g.Line)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var mockAddresses = map[uint64][]plugin.Frame{
|
||||
1000: []plugin.Frame{{"fun11", "file11.src", 10}},
|
||||
2000: []plugin.Frame{{"fun21", "file21.src", 20}, {"fun22", "file22.src", 20}},
|
||||
3000: []plugin.Frame{{"fun31", "file31.src", 30}, {"fun32", "file32.src", 30}, {"fun33", "file33.src", 30}},
|
||||
4000: []plugin.Frame{{"fun41", "file41.src", 40}, {"fun42", "file42.src", 40}, {"fun43", "file43.src", 40}, {"fun44", "file44.src", 40}},
|
||||
5000: []plugin.Frame{{"fun51", "file51.src", 50}, {"fun52", "file52.src", 50}, {"fun53", "file53.src", 50}, {"fun54", "file54.src", 50}, {"fun55", "file55.src", 50}},
|
||||
}
|
||||
|
||||
type mockObjTool struct{}
|
||||
|
||||
func (mockObjTool) Open(file string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
return mockObjFile{frames: mockAddresses}, nil
|
||||
}
|
||||
|
||||
func (mockObjTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
|
||||
return nil, fmt.Errorf("disassembly not supported")
|
||||
}
|
||||
|
||||
type mockObjFile struct {
|
||||
frames map[uint64][]plugin.Frame
|
||||
}
|
||||
|
||||
func (mockObjFile) Name() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (mockObjFile) Base() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (mockObjFile) BuildID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (mf mockObjFile) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
return mf.frames[addr], nil
|
||||
}
|
||||
|
||||
func (mockObjFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
||||
return []*plugin.Sym{}, nil
|
||||
}
|
||||
|
||||
func (mockObjFile) Close() error {
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue