aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/vendor/github.com
diff options
context:
space:
mode:
authorEmmanuel T Odeke <emmanuel@orijtech.com>2021-02-26 02:27:24 -0800
committerEmmanuel Odeke <emmanuel@orijtech.com>2021-03-15 19:02:39 +0000
commit2d4042d4ab3a2021819dce91eb228daf8fa5e557 (patch)
tree0a0b71f4bc2859a19320760f54be55f891b5e07a /src/cmd/vendor/github.com
parenta8d9fb2fcd1fc11b41651e0ea608b3a3e90755b7 (diff)
downloadgo-2d4042d4ab3a2021819dce91eb228daf8fa5e557.tar.xz
all: update golang.org/x/* dependencies
Updates src/ and src/cmd/* dependencies, using go mod vendor as well as updatestd -branch=master -goroot=$GOROOT This change was ran in anticipation of bringing in x/net/http2 CL 237957. For #32112. For #36905. Change-Id: If8cefc348463b6d82d85020b57db411213720ef8 Reviewed-on: https://go-review.googlesource.com/c/go/+/296789 Trust: Emmanuel Odeke <emmanuel@orijtech.com> Trust: Dmitri Shuralyov <dmitshur@golang.org> Trust: Bryan C. Mills <bcmills@google.com> Run-TryBot: Emmanuel Odeke <emmanuel@orijtech.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Alexander Rakoczy <alex@golang.org>
Diffstat (limited to 'src/cmd/vendor/github.com')
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go24
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go36
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go58
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go65
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go3
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go6
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go1
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go81
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go4
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go240
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/report/source.go723
-rw-r--r--src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go3
-rw-r--r--src/cmd/vendor/github.com/google/pprof/profile/merge.go5
-rw-r--r--src/cmd/vendor/github.com/google/pprof/profile/profile.go18
14 files changed, 892 insertions, 375 deletions
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
index c0661bf4aa..0c702398d3 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner.go
@@ -70,7 +70,11 @@ func (a *addr2LinerJob) write(s string) error {
}
func (a *addr2LinerJob) readLine() (string, error) {
- return a.out.ReadString('\n')
+ s, err := a.out.ReadString('\n')
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(s), nil
}
// close releases any resources used by the addr2liner object.
@@ -115,19 +119,11 @@ func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) {
return a, nil
}
-func (d *addr2Liner) readString() (string, error) {
- s, err := d.rw.readLine()
- if err != nil {
- return "", err
- }
- return strings.TrimSpace(s), nil
-}
-
// readFrame parses the addr2line output for a single address. It
// returns a populated plugin.Frame and whether it has reached the end of the
// data.
func (d *addr2Liner) readFrame() (plugin.Frame, bool) {
- funcname, err := d.readString()
+ funcname, err := d.rw.readLine()
if err != nil {
return plugin.Frame{}, true
}
@@ -135,12 +131,12 @@ func (d *addr2Liner) readFrame() (plugin.Frame, bool) {
// If addr2line returns a hex address we can assume it is the
// sentinel. Read and ignore next two lines of output from
// addr2line
- d.readString()
- d.readString()
+ d.rw.readLine()
+ d.rw.readLine()
return plugin.Frame{}, true
}
- fileline, err := d.readString()
+ fileline, err := d.rw.readLine()
if err != nil {
return plugin.Frame{}, true
}
@@ -186,7 +182,7 @@ func (d *addr2Liner) rawAddrInfo(addr uint64) ([]plugin.Frame, error) {
return nil, err
}
- resp, err := d.readString()
+ resp, err := d.rw.readLine()
if err != nil {
return nil, err
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
index 68fa5593ad..24c48e649b 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go
@@ -43,15 +43,21 @@ type llvmSymbolizerJob struct {
cmd *exec.Cmd
in io.WriteCloser
out *bufio.Reader
+ // llvm-symbolizer requires the symbol type, CODE or DATA, for symbolization.
+ symType string
}
func (a *llvmSymbolizerJob) write(s string) error {
- _, err := fmt.Fprint(a.in, s+"\n")
+ _, err := fmt.Fprintln(a.in, a.symType, s)
return err
}
func (a *llvmSymbolizerJob) readLine() (string, error) {
- return a.out.ReadString('\n')
+ s, err := a.out.ReadString('\n')
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(s), nil
}
// close releases any resources used by the llvmSymbolizer object.
@@ -64,13 +70,17 @@ func (a *llvmSymbolizerJob) close() {
// information about the given executable file. If file is a shared
// library, base should be the address at which it was mapped in the
// program under consideration.
-func newLLVMSymbolizer(cmd, file string, base uint64) (*llvmSymbolizer, error) {
+func newLLVMSymbolizer(cmd, file string, base uint64, isData bool) (*llvmSymbolizer, error) {
if cmd == "" {
cmd = defaultLLVMSymbolizer
}
j := &llvmSymbolizerJob{
- cmd: exec.Command(cmd, "-inlining", "-demangle=false"),
+ cmd: exec.Command(cmd, "-inlining", "-demangle=false"),
+ symType: "CODE",
+ }
+ if isData {
+ j.symType = "DATA"
}
var err error
@@ -97,19 +107,11 @@ func newLLVMSymbolizer(cmd, file string, base uint64) (*llvmSymbolizer, error) {
return a, nil
}
-func (d *llvmSymbolizer) readString() (string, error) {
- s, err := d.rw.readLine()
- if err != nil {
- return "", err
- }
- return strings.TrimSpace(s), nil
-}
-
// readFrame parses the llvm-symbolizer output for a single address. It
// returns a populated plugin.Frame and whether it has reached the end of the
// data.
func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) {
- funcname, err := d.readString()
+ funcname, err := d.rw.readLine()
if err != nil {
return plugin.Frame{}, true
}
@@ -121,13 +123,17 @@ func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) {
funcname = ""
}
- fileline, err := d.readString()
+ fileline, err := d.rw.readLine()
if err != nil {
return plugin.Frame{Func: funcname}, true
}
linenumber := 0
- if fileline == "??:0" {
+ // The llvm-symbolizer outputs the <file_name>:<line_number>:<column_number>.
+ // When it cannot identify the source code location, it outputs "??:0:0".
+ // Older versions output just the filename and line number, so we check for
+ // both conditions here.
+ if fileline == "??:0" || fileline == "??:0:0" {
fileline = ""
} else {
switch split := strings.Split(fileline, ":"); len(split) {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go
index 1987bd3dab..8e0ccc728d 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_nm.go
@@ -29,27 +29,42 @@ const (
defaultNM = "nm"
)
-// addr2LinerNM is a connection to an nm command for obtaining address
+// addr2LinerNM is a connection to an nm command for obtaining symbol
// information from a binary.
type addr2LinerNM struct {
- m []symbolInfo // Sorted list of addresses from binary.
+ m []symbolInfo // Sorted list of symbol addresses from binary.
}
type symbolInfo struct {
address uint64
+ size uint64
name string
+ symType string
}
-// newAddr2LinerNM starts the given nm command reporting information about the
-// given executable file. If file is a shared library, base should be
-// the address at which it was mapped in the program under
-// consideration.
+// isData returns if the symbol has a known data object symbol type.
+func (s *symbolInfo) isData() bool {
+ // The following symbol types are taken from https://linux.die.net/man/1/nm:
+ // Lowercase letter means local symbol, uppercase denotes a global symbol.
+ // - b or B: the symbol is in the uninitialized data section, e.g. .bss;
+ // - d or D: the symbol is in the initialized data section;
+ // - r or R: the symbol is in a read only data section;
+ // - v or V: the symbol is a weak object;
+ // - W: the symbol is a weak symbol that has not been specifically tagged as a
+ // weak object symbol. Experiments with some binaries, showed these to be
+ // mostly data objects.
+ return strings.ContainsAny(s.symType, "bBdDrRvVW")
+}
+
+// newAddr2LinerNM starts the given nm command reporting information about the
+// given executable file. If file is a shared library, base should be the
+// address at which it was mapped in the program under consideration.
func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) {
if cmd == "" {
cmd = defaultNM
}
var b bytes.Buffer
- c := exec.Command(cmd, "-n", file)
+ c := exec.Command(cmd, "--numeric-sort", "--print-size", "--format=posix", file)
c.Stdout = &b
if err := c.Run(); err != nil {
return nil, err
@@ -74,17 +89,23 @@ func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) {
return nil, err
}
line = strings.TrimSpace(line)
- fields := strings.SplitN(line, " ", 3)
- if len(fields) != 3 {
+ fields := strings.Split(line, " ")
+ if len(fields) != 4 {
+ continue
+ }
+ address, err := strconv.ParseUint(fields[2], 16, 64)
+ if err != nil {
continue
}
- address, err := strconv.ParseUint(fields[0], 16, 64)
+ size, err := strconv.ParseUint(fields[3], 16, 64)
if err != nil {
continue
}
a.m = append(a.m, symbolInfo{
address: address + base,
- name: fields[2],
+ size: size,
+ name: fields[0],
+ symType: fields[1],
})
}
@@ -94,7 +115,7 @@ func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) {
// addrInfo returns the stack frame information for a specific program
// address. It returns nil if the address could not be identified.
func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) {
- if len(a.m) == 0 || addr < a.m[0].address || addr > a.m[len(a.m)-1].address {
+ if len(a.m) == 0 || addr < a.m[0].address || addr >= (a.m[len(a.m)-1].address+a.m[len(a.m)-1].size) {
return nil, nil
}
@@ -113,12 +134,11 @@ func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) {
}
}
- // Address is between a.m[low] and a.m[high].
- // Pick low, as it represents [low, high).
- f := []plugin.Frame{
- {
- Func: a.m[low].name,
- },
+ // Address is between a.m[low] and a.m[high]. Pick low, as it represents
+ // [low, high). For data symbols, we use a strict check that the address is in
+ // the [start, start + size) range of a.m[low].
+ if a.m[low].isData() && addr >= (a.m[low].address+a.m[low].size) {
+ return nil, nil
}
- return f, nil
+ return []plugin.Frame{{Func: a.m[low].name}}, nil
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go
index 4b67cc4ab0..576a6ee66a 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go
@@ -18,6 +18,7 @@ package binutils
import (
"debug/elf"
"debug/macho"
+ "debug/pe"
"encoding/binary"
"errors"
"fmt"
@@ -255,7 +256,7 @@ func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([]
if !b.objdumpFound {
return nil, errors.New("cannot disasm: no objdump tool available")
}
- args := []string{"--disassemble-all", "--demangle", "--no-show-raw-insn",
+ args := []string{"--disassemble", "--demangle", "--no-show-raw-insn",
"--line-numbers", fmt.Sprintf("--start-address=%#x", start),
fmt.Sprintf("--stop-address=%#x", end)}
@@ -337,6 +338,15 @@ func (bu *Binutils) Open(name string, start, limit, offset uint64) (plugin.ObjFi
return f, nil
}
+ peMagic := string(header[:2])
+ if peMagic == "MZ" {
+ f, err := b.openPE(name, start, limit, offset)
+ if err != nil {
+ return nil, fmt.Errorf("error reading PE file %s: %v", name, err)
+ }
+ return f, nil
+ }
+
return nil, fmt.Errorf("unrecognized binary format: %s", name)
}
@@ -440,7 +450,23 @@ func (b *binrep) openELF(name string, start, limit, offset uint64) (plugin.ObjFi
}
}
- base, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), stextOffset, start, limit, offset)
+ var ph *elf.ProgHeader
+ // For user space executables, find the actual program segment that is
+ // associated with the given mapping. Skip this search if limit <= start.
+ // We cannot use just a check on the start address of the mapping to tell if
+ // it's a kernel / .ko module mapping, because with quipper address remapping
+ // enabled, the address would be in the lower half of the address space.
+ if stextOffset == nil && start < limit && limit < (uint64(1)<<63) {
+ ph, err = elfexec.FindProgHeaderForMapping(ef, offset, limit-start)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find program header for file %q, mapping pgoff %x, memsz=%x: %v", name, offset, limit-start, err)
+ }
+ } else {
+ // For the kernel, find the program segment that includes the .text section.
+ ph = elfexec.FindTextProgHeader(ef)
+ }
+
+ base, err := elfexec.GetBase(&ef.FileHeader, ph, stextOffset, start, limit, offset)
if err != nil {
return nil, fmt.Errorf("could not identify base for %s: %v", name, err)
}
@@ -451,10 +477,38 @@ func (b *binrep) openELF(name string, start, limit, offset uint64) (plugin.ObjFi
buildID = fmt.Sprintf("%x", id)
}
}
+ isData := ph != nil && ph.Flags&elf.PF_X == 0
+ if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
+ return &fileNM{file: file{b, name, base, buildID, isData}}, nil
+ }
+ return &fileAddr2Line{file: file{b, name, base, buildID, isData}}, nil
+}
+
+func (b *binrep) openPE(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
+ pf, err := pe.Open(name)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing %s: %v", name, err)
+ }
+ defer pf.Close()
+
+ var imageBase uint64
+ switch h := pf.OptionalHeader.(type) {
+ case *pe.OptionalHeader32:
+ imageBase = uint64(h.ImageBase)
+ case *pe.OptionalHeader64:
+ imageBase = uint64(h.ImageBase)
+ default:
+ return nil, fmt.Errorf("unknown OptionalHeader %T", pf.OptionalHeader)
+ }
+
+ var base uint64
+ if start > 0 {
+ base = start - imageBase
+ }
if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
- return &fileNM{file: file{b, name, base, buildID}}, nil
+ return &fileNM{file: file{b: b, name: name, base: base}}, nil
}
- return &fileAddr2Line{file: file{b, name, base, buildID}}, nil
+ return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil
}
// file implements the binutils.ObjFile interface.
@@ -463,6 +517,7 @@ type file struct {
name string
base uint64
buildID string
+ isData bool
}
func (f *file) Name() string {
@@ -538,7 +593,7 @@ func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) {
}
func (f *fileAddr2Line) init() {
- if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base); err == nil {
+ if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base, f.isData); err == nil {
f.llvmSymbolizer = llvmSymbolizer
return
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
index d0be614bdc..e64adf58cd 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go
@@ -19,6 +19,7 @@ import (
"io"
"regexp"
"strconv"
+ "strings"
"github.com/google/pprof/internal/plugin"
"github.com/ianlancetaylor/demangle"
@@ -121,6 +122,7 @@ func disassemble(asm []byte) ([]plugin.Inst, error) {
break
}
}
+ input = strings.TrimSpace(input)
if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 {
if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil {
@@ -167,6 +169,7 @@ func nextSymbol(buf *bytes.Buffer) (uint64, string, error) {
return 0, "", err
}
}
+ line = strings.TrimSpace(line)
if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 {
if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go
index 878f2e1ead..3967a12d45 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go
@@ -163,7 +163,7 @@ func applyCommandOverrides(cmd string, outputFormat int, cfg config) config {
trim := cfg.Trim
switch cmd {
- case "disasm", "weblist":
+ case "disasm":
trim = false
cfg.Granularity = "addresses"
// Force the 'noinlines' mode so that source locations for a given address
@@ -172,6 +172,10 @@ func applyCommandOverrides(cmd string, outputFormat int, cfg config) config {
// This is because the merge is done by address and in case of an inlined
// stack each of the inlined entries is a separate callgraph node.
cfg.NoInlines = true
+ case "weblist":
+ trim = false
+ cfg.Granularity = "addresses"
+ cfg.NoInlines = false // Need inline info to support call expansion
case "peek":
trim = false
case "list":
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go
index 4f7610c7e5..b8e8b50b94 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go
@@ -62,6 +62,7 @@ a {
.header .title h1 {
font-size: 1.75em;
margin-right: 1rem;
+ margin-bottom: 4px;
}
.header .title a {
color: #212121;
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
index d520765cc9..3b3c6ee89f 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
@@ -283,3 +283,84 @@ func FindTextProgHeader(f *elf.File) *elf.ProgHeader {
}
return nil
}
+
+// FindProgHeaderForMapping returns the loadable program segment header that is
+// fully contained in the runtime mapping with file offset pgoff and memory size
+// memsz, or an error if the segment cannot be determined. The function returns
+// a nil program header and no error if the ELF binary has no loadable segments.
+func FindProgHeaderForMapping(f *elf.File, pgoff, memsz uint64) (*elf.ProgHeader, error) {
+ var headers []*elf.ProgHeader
+ loadables := 0
+ for _, p := range f.Progs {
+ if p.Type == elf.PT_LOAD && pgoff <= p.Off && p.Off+p.Memsz <= pgoff+memsz {
+ headers = append(headers, &p.ProgHeader)
+ }
+ if p.Type == elf.PT_LOAD {
+ loadables++
+ }
+ }
+ if len(headers) == 1 {
+ return headers[0], nil
+ }
+ // Some ELF files don't contain any program segments, e.g. .ko loadable kernel
+ // modules. Don't return an error in such cases.
+ if loadables == 0 {
+ return nil, nil
+ }
+ if len(headers) == 0 {
+ return nil, fmt.Errorf("no program header matches file offset %x and memory size %x", pgoff, memsz)
+ }
+
+ // Segments are mapped page aligned. In some cases, segments may be smaller
+ // than a page, which causes the next segment to start at a file offset that
+ // is logically on the same page if we were to align file offsets by page.
+ // Example:
+ // LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000
+ // 0x00000000000006fc 0x00000000000006fc R E 0x200000
+ // LOAD 0x0000000000000e10 0x0000000000600e10 0x0000000000600e10
+ // 0x0000000000000230 0x0000000000000238 RW 0x200000
+ //
+ // In this case, perf records the following mappings for this executable:
+ // 0 0 [0xc0]: PERF_RECORD_MMAP2 87867/87867: [0x400000(0x1000) @ 0 00:3c 512041 0]: r-xp exename
+ // 0 0 [0xc0]: PERF_RECORD_MMAP2 87867/87867: [0x600000(0x2000) @ 0 00:3c 512041 0]: rw-p exename
+ //
+ // Both mappings have file offset 0. The first mapping is one page length and
+ // it can include only the first loadable segment. Due to page alignment, the
+ // second mapping starts also at file offset 0, and it spans two pages. It can
+ // include both the first and the second loadable segments. We must return the
+ // correct program header to compute the correct base offset.
+ //
+ // We cannot use the mapping protections to distinguish between segments,
+ // because protections are not passed through to this function.
+ // We cannot use the start address to differentiate between segments, because
+ // with ASLR, the mapping start address can be any value.
+ //
+ // We use a heuristic to compute the minimum mapping size required for a
+ // segment, assuming mappings are 4k page aligned, and return the segment that
+ // matches the given mapping size.
+ const pageSize = 4096
+
+ // The memory size based heuristic makes sense only if the mapping size is a
+ // multiple of 4k page size.
+ if memsz%pageSize != 0 {
+ return nil, fmt.Errorf("mapping size = %x and %d segments match the passed in mapping", memsz, len(headers))
+ }
+
+ // Return an error if no segment, or multiple segments match the size, so we can debug.
+ var ph *elf.ProgHeader
+ pageMask := ^uint64(pageSize - 1)
+ for _, h := range headers {
+ wantSize := (h.Vaddr+h.Memsz+pageSize-1)&pageMask - (h.Vaddr & pageMask)
+ if wantSize != memsz {
+ continue
+ }
+ if ph != nil {
+ return nil, fmt.Errorf("found second program header (%#v) that matches memsz %x, first program header is %#v", *h, memsz, *ph)
+ }
+ ph = h
+ }
+ if ph == nil {
+ return nil, fmt.Errorf("found %d matching program headers, but none matches mapping size %x", len(headers), memsz)
+ }
+ return ph, nil
+}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go
index 8cb87da9af..8008675248 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go
@@ -322,8 +322,8 @@ func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) {
}
// dotColor returns a color for the given score (between -1.0 and
-// 1.0), with -1.0 colored red, 0.0 colored grey, and 1.0 colored
-// green. If isBackground is true, then a light (low-saturation)
+// 1.0), with -1.0 colored green, 0.0 colored grey, and 1.0 colored
+// red. If isBackground is true, then a light (low-saturation)
// color is returned (suitable for use as a background color);
// otherwise, a darker color is returned (suitable for use as a
// foreground color).
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
index e95b261bc2..53325740a3 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
@@ -111,8 +111,9 @@ func compatibleValueTypes(v1, v2 *profile.ValueType) bool {
}
return v1.Unit == v2.Unit ||
- (isTimeUnit(v1.Unit) && isTimeUnit(v2.Unit)) ||
- (isMemoryUnit(v1.Unit) && isMemoryUnit(v2.Unit))
+ (timeUnits.sniffUnit(v1.Unit) != nil && timeUnits.sniffUnit(v2.Unit) != nil) ||
+ (memoryUnits.sniffUnit(v1.Unit) != nil && memoryUnits.sniffUnit(v2.Unit) != nil) ||
+ (gcuUnits.sniffUnit(v1.Unit) != nil && gcuUnits.sniffUnit(v2.Unit) != nil)
}
// Scale a measurement from an unit to a different unit and returns
@@ -124,12 +125,15 @@ func Scale(value int64, fromUnit, toUnit string) (float64, string) {
v, u := Scale(-value, fromUnit, toUnit)
return -v, u
}
- if m, u, ok := memoryLabel(value, fromUnit, toUnit); ok {
+ if m, u, ok := memoryUnits.convertUnit(value, fromUnit, toUnit); ok {
return m, u
}
- if t, u, ok := timeLabel(value, fromUnit, toUnit); ok {
+ if t, u, ok := timeUnits.convertUnit(value, fromUnit, toUnit); ok {
return t, u
}
+ if g, u, ok := gcuUnits.convertUnit(value, fromUnit, toUnit); ok {
+ return g, u
+ }
// Skip non-interesting units.
switch toUnit {
case "count", "sample", "unit", "minimum", "auto":
@@ -172,157 +176,121 @@ func Percentage(value, total int64) string {
}
}
-// isMemoryUnit returns whether a name is recognized as a memory size
-// unit.
-func isMemoryUnit(unit string) bool {
- switch strings.TrimSuffix(strings.ToLower(unit), "s") {
- case "byte", "b", "kilobyte", "kb", "megabyte", "mb", "gigabyte", "gb":
- return true
- }
- return false
+// unit includes a list of aliases representing a specific unit and a factor
+// which one can multiple a value in the specified unit by to get the value
+// in terms of the base unit.
+type unit struct {
+ canonicalName string
+ aliases []string
+ factor float64
}
-func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
- fromUnit = strings.TrimSuffix(strings.ToLower(fromUnit), "s")
- toUnit = strings.TrimSuffix(strings.ToLower(toUnit), "s")
-
- switch fromUnit {
- case "byte", "b":
- case "kb", "kbyte", "kilobyte":
- value *= 1024
- case "mb", "mbyte", "megabyte":
- value *= 1024 * 1024
- case "gb", "gbyte", "gigabyte":
- value *= 1024 * 1024 * 1024
- case "tb", "tbyte", "terabyte":
- value *= 1024 * 1024 * 1024 * 1024
- case "pb", "pbyte", "petabyte":
- value *= 1024 * 1024 * 1024 * 1024 * 1024
- default:
- return 0, "", false
- }
+// unitType includes a list of units that are within the same category (i.e.
+// memory or time units) and a default unit to use for this type of unit.
+type unitType struct {
+ defaultUnit unit
+ units []unit
+}
- if toUnit == "minimum" || toUnit == "auto" {
- switch {
- case value < 1024:
- toUnit = "b"
- case value < 1024*1024:
- toUnit = "kb"
- case value < 1024*1024*1024:
- toUnit = "mb"
- case value < 1024*1024*1024*1024:
- toUnit = "gb"
- case value < 1024*1024*1024*1024*1024:
- toUnit = "tb"
- default:
- toUnit = "pb"
+// findByAlias returns the unit associated with the specified alias. It returns
+// nil if the unit with such alias is not found.
+func (ut unitType) findByAlias(alias string) *unit {
+ for _, u := range ut.units {
+ for _, a := range u.aliases {
+ if alias == a {
+ return &u
+ }
}
}
-
- var output float64
- switch toUnit {
- default:
- output, toUnit = float64(value), "B"
- case "kb", "kbyte", "kilobyte":
- output, toUnit = float64(value)/1024, "kB"
- case "mb", "mbyte", "megabyte":
- output, toUnit = float64(value)/(1024*1024), "MB"
- case "gb", "gbyte", "gigabyte":
- output, toUnit = float64(value)/(1024*1024*1024), "GB"
- case "tb", "tbyte", "terabyte":
- output, toUnit = float64(value)/(1024*1024*1024*1024), "TB"
- case "pb", "pbyte", "petabyte":
- output, toUnit = float64(value)/(1024*1024*1024*1024*1024), "PB"
- }
- return output, toUnit, true
+ return nil
}
-// isTimeUnit returns whether a name is recognized as a time unit.
-func isTimeUnit(unit string) bool {
+// sniffUnit simpifies the input alias and returns the unit associated with the
+// specified alias. It returns nil if the unit with such alias is not found.
+func (ut unitType) sniffUnit(unit string) *unit {
unit = strings.ToLower(unit)
if len(unit) > 2 {
unit = strings.TrimSuffix(unit, "s")
}
-
- switch unit {
- case "nanosecond", "ns", "microsecond", "millisecond", "ms", "s", "second", "sec", "hr", "day", "week", "year":
- return true
- }
- return false
+ return ut.findByAlias(unit)
}
-func timeLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bool) {
- fromUnit = strings.ToLower(fromUnit)
- if len(fromUnit) > 2 {
- fromUnit = strings.TrimSuffix(fromUnit, "s")
+// autoScale takes in the value with units of the base unit and returns
+// that value scaled to a reasonable unit if a reasonable unit is
+// found.
+func (ut unitType) autoScale(value float64) (float64, string, bool) {
+ var f float64
+ var unit string
+ for _, u := range ut.units {
+ if u.factor >= f && (value/u.factor) >= 1.0 {
+ f = u.factor
+ unit = u.canonicalName
+ }
}
-
- toUnit = strings.ToLower(toUnit)
- if len(toUnit) > 2 {
- toUnit = strings.TrimSuffix(toUnit, "s")
+ if f == 0 {
+ return 0, "", false
}
+ return value / f, unit, true
+}
- var d time.Duration
- switch fromUnit {
- case "nanosecond", "ns":
- d = time.Duration(value) * time.Nanosecond
- case "microsecond":
- d = time.Duration(value) * time.Microsecond
- case "millisecond", "ms":
- d = time.Duration(value) * time.Millisecond
- case "second", "sec", "s":
- d = time.Duration(value) * time.Second
- case "cycle":
- return float64(value), "", true
- default:
+// convertUnit converts a value from the fromUnit to the toUnit, autoscaling
+// the value if the toUnit is "minimum" or "auto". If the fromUnit is not
+// included in the unitType, then a false boolean will be returned. If the
+// toUnit is not in the unitType, the value will be returned in terms of the
+// default unitType.
+func (ut unitType) convertUnit(value int64, fromUnitStr, toUnitStr string) (float64, string, bool) {
+ fromUnit := ut.sniffUnit(fromUnitStr)
+ if fromUnit == nil {
return 0, "", false
}
-
- if toUnit == "minimum" || toUnit == "auto" {
- switch {
- case d < 1*time.Microsecond:
- toUnit = "ns"
- case d < 1*time.Millisecond:
- toUnit = "us"
- case d < 1*time.Second:
- toUnit = "ms"
- case d < 1*time.Minute:
- toUnit = "sec"
- case d < 1*time.Hour:
- toUnit = "min"
- case d < 24*time.Hour:
- toUnit = "hour"
- case d < 15*24*time.Hour:
- toUnit = "day"
- case d < 120*24*time.Hour:
- toUnit = "week"
- default:
- toUnit = "year"
+ v := float64(value) * fromUnit.factor
+ if toUnitStr == "minimum" || toUnitStr == "auto" {
+ if v, u, ok := ut.autoScale(v); ok {
+ return v, u, true
}
+ return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true
}
-
- var output float64
- dd := float64(d)
- switch toUnit {
- case "ns", "nanosecond":
- output, toUnit = dd/float64(time.Nanosecond), "ns"
- case "us", "microsecond":
- output, toUnit = dd/float64(time.Microsecond), "us"
- case "ms", "millisecond":
- output, toUnit = dd/float64(time.Millisecond), "ms"
- case "min", "minute":
- output, toUnit = dd/float64(time.Minute), "mins"
- case "hour", "hr":
- output, toUnit = dd/float64(time.Hour), "hrs"
- case "day":
- output, toUnit = dd/float64(24*time.Hour), "days"
- case "week", "wk":
- output, toUnit = dd/float64(7*24*time.Hour), "wks"
- case "year", "yr":
- output, toUnit = dd/float64(365*24*time.Hour), "yrs"
- default:
- // "sec", "second", "s" handled by default case.
- output, toUnit = dd/float64(time.Second), "s"
+ toUnit := ut.sniffUnit(toUnitStr)
+ if toUnit == nil {
+ return v / ut.defaultUnit.factor, ut.defaultUnit.canonicalName, true
}
- return output, toUnit, true
+ return v / toUnit.factor, toUnit.canonicalName, true
+}
+
+var memoryUnits = unitType{
+ units: []unit{
+ {"B", []string{"b", "byte"}, 1},
+ {"kB", []string{"kb", "kbyte", "kilobyte"}, float64(1 << 10)},
+ {"MB", []string{"mb", "mbyte", "megabyte"}, float64(1 << 20)},
+ {"GB", []string{"gb", "gbyte", "gigabyte"}, float64(1 << 30)},
+ {"TB", []string{"tb", "tbyte", "terabyte"}, float64(1 << 40)},
+ {"PB", []string{"pb", "pbyte", "petabyte"}, float64(1 << 50)},
+ },
+ defaultUnit: unit{"B", []string{"b", "byte"}, 1},
+}
+
+var timeUnits = unitType{
+ units: []unit{
+ {"ns", []string{"ns", "nanosecond"}, float64(time.Nanosecond)},
+ {"us", []string{"μs", "us", "microsecond"}, float64(time.Microsecond)},
+ {"ms", []string{"ms", "millisecond"}, float64(time.Millisecond)},
+ {"s", []string{"s", "sec", "second"}, float64(time.Second)},
+ {"hrs", []string{"hour", "hr"}, float64(time.Hour)},
+ },
+ defaultUnit: unit{"s", []string{}, float64(time.Second)},
+}
+
+var gcuUnits = unitType{
+ units: []unit{
+ {"n*GCU", []string{"nanogcu"}, 1e-9},
+ {"u*GCU", []string{"microgcu"}, 1e-6},
+ {"m*GCU", []string{"milligcu"}, 1e-3},
+ {"GCU", []string{"gcu"}, 1},
+ {"k*GCU", []string{"kilogcu"}, 1e3},
+ {"M*GCU", []string{"megagcu"}, 1e6},
+ {"G*GCU", []string{"gigagcu"}, 1e9},
+ {"T*GCU", []string{"teragcu"}, 1e12},
+ {"P*GCU", []string{"petagcu"}, 1e15},
+ },
+ defaultUnit: unit{"GCU", []string{}, 1.0},
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go
index b480535439..4f841eff5d 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go
@@ -24,12 +24,15 @@ import (
"io"
"os"
"path/filepath"
+ "regexp"
+ "sort"
"strconv"
"strings"
"github.com/google/pprof/internal/graph"
"github.com/google/pprof/internal/measurement"
"github.com/google/pprof/internal/plugin"
+ "github.com/google/pprof/profile"
)
// printSource prints an annotated source listing, include all
@@ -126,191 +129,554 @@ func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error {
return nil
}
+// sourcePrinter holds state needed for generating source+asm HTML listing.
+type sourcePrinter struct {
+ reader *sourceReader
+ objectTool plugin.ObjTool
+ objects map[string]plugin.ObjFile // Opened object files
+ sym *regexp.Regexp // May be nil
+ files map[string]*sourceFile // Set of files to print.
+ insts map[uint64]instructionInfo // Instructions of interest (keyed by address).
+
+ // Set of function names that we are interested in (because they had
+ // a sample and match sym).
+ interest map[string]bool
+
+ // Mapping from system function names to printable names.
+ prettyNames map[string]string
+}
+
+// instructionInfo holds collected information for an instruction.
+type instructionInfo struct {
+ objAddr uint64 // Address in object file (with base subtracted out)
+ length int // Instruction length in bytes
+ disasm string // Disassembly of instruction
+ file string // For top-level function in which instruction occurs
+ line int // For top-level function in which instruction occurs
+ flat, cum int64 // Samples to report (divisor already applied)
+}
+
+// sourceFile contains collected information for files we will print.
+type sourceFile struct {
+ fname string
+ cum int64
+ flat int64
+ lines map[int][]sourceInst // Instructions to show per line
+ funcName map[int]string // Function name per line
+}
+
+// sourceInst holds information for an instruction to be displayed.
+type sourceInst struct {
+ addr uint64
+ stack []callID // Inlined call-stack
+}
+
+// sourceFunction contains information for a contiguous range of lines per function we
+// will print.
+type sourceFunction struct {
+ name string
+ begin, end int // Line numbers (end is not included in the range)
+ flat, cum int64
+}
+
+// addressRange is a range of addresses plus the object file that contains it.
+type addressRange struct {
+ begin, end uint64
+ obj plugin.ObjFile
+ mapping *profile.Mapping
+ score int64 // Used to order ranges for processing
+}
+
// PrintWebList prints annotated source listing of rpt to w.
+// rpt.prof should contain inlined call info.
func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) error {
- o := rpt.options
- g := rpt.newGraph(nil)
+ sourcePath := rpt.options.SourcePath
+ if sourcePath == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf("could not stat current dir: %v", err)
+ }
+ sourcePath = wd
+ }
+ sp := newSourcePrinter(rpt, obj, sourcePath)
+ sp.print(w, maxFiles, rpt)
+ sp.close()
+ return nil
+}
+
+func newSourcePrinter(rpt *Report, obj plugin.ObjTool, sourcePath string) *sourcePrinter {
+ sp := &sourcePrinter{
+ reader: newSourceReader(sourcePath, rpt.options.TrimPath),
+ objectTool: obj,
+ objects: map[string]plugin.ObjFile{},
+ sym: rpt.options.Symbol,
+ files: map[string]*sourceFile{},
+ insts: map[uint64]instructionInfo{},
+ prettyNames: map[string]string{},
+ interest: map[string]bool{},
+ }
// If the regexp source can be parsed as an address, also match
// functions that land on that address.
var address *uint64
- if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil {
- address = &hex
+ if sp.sym != nil {
+ if hex, err := strconv.ParseUint(sp.sym.String(), 0, 64); err == nil {
+ address = &hex
+ }
}
- sourcePath := o.SourcePath
- if sourcePath == "" {
- wd, err := os.Getwd()
- if err != nil {
- return fmt.Errorf("could not stat current dir: %v", err)
+ addrs := map[uint64]bool{}
+ flat := map[uint64]int64{}
+ cum := map[uint64]int64{}
+
+ // Record an interest in the function corresponding to lines[index].
+ markInterest := func(addr uint64, lines []profile.Line, index int) {
+ fn := lines[index]
+ if fn.Function == nil {
+ return
}
- sourcePath = wd
+ sp.interest[fn.Function.Name] = true
+ sp.interest[fn.Function.SystemName] = true
+ addrs[addr] = true
}
- reader := newSourceReader(sourcePath, o.TrimPath)
- type fileFunction struct {
- fileName, functionName string
+ // See if sp.sym matches line.
+ matches := func(line profile.Line) bool {
+ if line.Function == nil {
+ return false
+ }
+ return sp.sym.MatchString(line.Function.Name) ||
+ sp.sym.MatchString(line.Function.SystemName) ||
+ sp.sym.MatchString(line.Function.Filename)
}
- // Extract interesting symbols from binary files in the profile and
- // classify samples per symbol.
- symbols := symbolsFromBinaries(rpt.prof, g, o.Symbol, address, obj)
- symNodes := nodesPerSymbol(g.Nodes, symbols)
+ // Extract sample counts and compute set of interesting functions.
+ for _, sample := range rpt.prof.Sample {
+ value := rpt.options.SampleValue(sample.Value)
+ if rpt.options.SampleMeanDivisor != nil {
+ div := rpt.options.SampleMeanDivisor(sample.Value)
+ if div != 0 {
+ value /= div
+ }
+ }
+
+ // Find call-sites matching sym.
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ loc := sample.Location[i]
+ for _, line := range loc.Line {
+ if line.Function == nil {
+ continue
+ }
+ sp.prettyNames[line.Function.SystemName] = line.Function.Name
+ }
- // Identify sources associated to a symbol by examining
- // symbol samples. Classify samples per source file.
- fileNodes := make(map[fileFunction]graph.Nodes)
- if len(symNodes) == 0 {
- for _, n := range g.Nodes {
- if n.Info.File == "" || !o.Symbol.MatchString(n.Info.Name) {
+ cum[loc.Address] += value
+ if i == 0 {
+ flat[loc.Address] += value
+ }
+
+ if sp.sym == nil || (address != nil && loc.Address == *address) {
+ // Interested in top-level entry of stack.
+ if len(loc.Line) > 0 {
+ markInterest(loc.Address, loc.Line, len(loc.Line)-1)
+ }
continue
}
- ff := fileFunction{n.Info.File, n.Info.Name}
- fileNodes[ff] = append(fileNodes[ff], n)
- }
- } else {
- for _, nodes := range symNodes {
- for _, n := range nodes {
- if n.Info.File != "" {
- ff := fileFunction{n.Info.File, n.Info.Name}
- fileNodes[ff] = append(fileNodes[ff], n)
+
+ // Seach in inlined stack for a match.
+ matchFile := (loc.Mapping != nil && sp.sym.MatchString(loc.Mapping.File))
+ for j, line := range loc.Line {
+ if (j == 0 && matchFile) || matches(line) {
+ markInterest(loc.Address, loc.Line, j)
}
}
}
}
- if len(fileNodes) == 0 {
- return fmt.Errorf("no source information for %s", o.Symbol.String())
+ sp.expandAddresses(rpt, addrs, flat)
+ sp.initSamples(flat, cum)
+ return sp
+}
+
+func (sp *sourcePrinter) close() {
+ for _, objFile := range sp.objects {
+ if objFile != nil {
+ objFile.Close()
+ }
}
+}
+
+func (sp *sourcePrinter) expandAddresses(rpt *Report, addrs map[uint64]bool, flat map[uint64]int64) {
+ // We found interesting addresses (ones with non-zero samples) above.
+ // Get covering address ranges and disassemble the ranges.
+ ranges := sp.splitIntoRanges(rpt.prof, addrs, flat)
- sourceFiles := make(graph.Nodes, 0, len(fileNodes))
- for _, nodes := range fileNodes {
- sNode := *nodes[0]
- sNode.Flat, sNode.Cum = nodes.Sum()
- sourceFiles = append(sourceFiles, &sNode)
+ // Trim ranges if there are too many.
+ const maxRanges = 25
+ sort.Slice(ranges, func(i, j int) bool {
+ return ranges[i].score > ranges[j].score
+ })
+ if len(ranges) > maxRanges {
+ ranges = ranges[:maxRanges]
}
- // Limit number of files printed?
- if maxFiles < 0 {
- sourceFiles.Sort(graph.FileOrder)
- } else {
- sourceFiles.Sort(graph.FlatNameOrder)
- if maxFiles < len(sourceFiles) {
- sourceFiles = sourceFiles[:maxFiles]
+ for _, r := range ranges {
+ base := r.obj.Base()
+ insts, err := sp.objectTool.Disasm(r.mapping.File, r.begin-base, r.end-base,
+ rpt.options.IntelSyntax)
+ if err != nil {
+ // TODO(sanjay): Report that the covered addresses are missing.
+ continue
+ }
+
+ var lastFrames []plugin.Frame
+ var lastAddr, maxAddr uint64
+ for i, inst := range insts {
+ addr := inst.Addr + base
+
+ // Guard against duplicate output from Disasm.
+ if addr <= maxAddr {
+ continue
+ }
+ maxAddr = addr
+
+ length := 1
+ if i+1 < len(insts) && insts[i+1].Addr > inst.Addr {
+ // Extend to next instruction.
+ length = int(insts[i+1].Addr - inst.Addr)
+ }
+
+ // Get inlined-call-stack for address.
+ frames, err := r.obj.SourceLine(addr)
+ if err != nil {
+ // Construct a frame from disassembler output.
+ frames = []plugin.Frame{{Func: inst.Function, File: inst.File, Line: inst.Line}}
+ }
+
+ x := instructionInfo{objAddr: inst.Addr, length: length, disasm: inst.Text}
+ if len(frames) > 0 {
+ // We could consider using the outer-most caller's source
+ // location so we give the some hint as to where the
+ // inlining happened that led to this instruction. So for
+ // example, suppose we have the following (inlined) call
+ // chains for this instruction:
+ // F1->G->H
+ // F2->G->H
+ // We could tag the instructions from the first call with
+ // F1 and instructions from the second call with F2. But
+ // that leads to a somewhat confusing display. So for now,
+ // we stick with just the inner-most location (i.e., H).
+ // In the future we will consider changing the display to
+ // make caller info more visible.
+ index := 0 // Inner-most frame
+ x.file = frames[index].File
+ x.line = frames[index].Line
+ }
+ sp.insts[addr] = x
+
+ // We sometimes get instructions with a zero reported line number.
+ // Make such instructions have the same line info as the preceding
+ // instruction, if an earlier instruction is found close enough.
+ const neighborhood = 32
+ if len(frames) > 0 && frames[0].Line != 0 {
+ lastFrames = frames
+ lastAddr = addr
+ } else if (addr-lastAddr <= neighborhood) && lastFrames != nil {
+ frames = lastFrames
+ }
+
+ // See if the stack contains a function we are interested in.
+ for i, f := range frames {
+ if !sp.interest[f.Func] {
+ continue
+ }
+
+ // Record sub-stack under frame's file/line.
+ fname := canonicalizeFileName(f.File)
+ file := sp.files[fname]
+ if file == nil {
+ file = &sourceFile{
+ fname: fname,
+ lines: map[int][]sourceInst{},
+ funcName: map[int]string{},
+ }
+ sp.files[fname] = file
+ }
+ callees := frames[:i]
+ stack := make([]callID, 0, len(callees))
+ for j := len(callees) - 1; j >= 0; j-- { // Reverse so caller is first
+ stack = append(stack, callID{
+ file: callees[j].File,
+ line: callees[j].Line,
+ })
+ }
+ file.lines[f.Line] = append(file.lines[f.Line], sourceInst{addr, stack})
+
+ // Remember the first function name encountered per source line
+ // and assume that that line belongs to that function.
+ if _, ok := file.funcName[f.Line]; !ok {
+ file.funcName[f.Line] = f.Func
+ }
+ }
}
}
+}
- // Print each file associated with this function.
- for _, n := range sourceFiles {
- ff := fileFunction{n.Info.File, n.Info.Name}
- fns := fileNodes[ff]
+// splitIntoRanges converts the set of addresses we are interested in into a set of address
+// ranges to disassemble.
+func (sp *sourcePrinter) splitIntoRanges(prof *profile.Profile, set map[uint64]bool, flat map[uint64]int64) []addressRange {
+ // List of mappings so we can stop expanding address ranges at mapping boundaries.
+ mappings := append([]*profile.Mapping{}, prof.Mapping...)
+ sort.Slice(mappings, func(i, j int) bool { return mappings[i].Start < mappings[j].Start })
- asm := assemblyPerSourceLine(symbols, fns, ff.fileName, obj, o.IntelSyntax)
- start, end := sourceCoordinates(asm)
+ var result []addressRange
+ addrs := make([]uint64, 0, len(set))
+ for addr := range set {
+ addrs = append(addrs, addr)
+ }
+ sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] })
- fnodes, path, err := getSourceFromFile(ff.fileName, reader, fns, start, end)
- if err != nil {
- fnodes, path = getMissingFunctionSource(ff.fileName, asm, start, end)
+ mappingIndex := 0
+ const expand = 500 // How much to expand range to pick up nearby addresses.
+ for i, n := 0, len(addrs); i < n; {
+ begin, end := addrs[i], addrs[i]
+ sum := flat[begin]
+ i++
+
+ // Advance to mapping containing addrs[i]
+ for mappingIndex < len(mappings) && mappings[mappingIndex].Limit <= begin {
+ mappingIndex++
+ }
+ if mappingIndex >= len(mappings) {
+ // TODO(sanjay): Report missed address and its samples.
+ break
+ }
+ m := mappings[mappingIndex]
+ obj := sp.objectFile(m)
+ if obj == nil {
+ // TODO(sanjay): Report missed address and its samples.
+ continue
}
- printFunctionHeader(w, ff.functionName, path, n.Flat, n.Cum, rpt)
- for _, fn := range fnodes {
- printFunctionSourceLine(w, fn, asm[fn.Info.Lineno], reader, rpt)
+ // Find following addresses that are close enough to addrs[i].
+ for i < n && addrs[i] <= end+2*expand && addrs[i] < m.Limit {
+ // When we expand ranges by "expand" on either side, the ranges
+ // for addrs[i] and addrs[i-1] will merge.
+ end = addrs[i]
+ sum += flat[end]
+ i++
}
- printFunctionClosing(w)
+ if m.Start-begin >= expand {
+ begin -= expand
+ } else {
+ begin = m.Start
+ }
+ if m.Limit-end >= expand {
+ end += expand
+ } else {
+ end = m.Limit
+ }
+
+ result = append(result, addressRange{begin, end, obj, m, sum})
}
- return nil
+ return result
}
-// sourceCoordinates returns the lowest and highest line numbers from
-// a set of assembly statements.
-func sourceCoordinates(asm map[int][]assemblyInstruction) (start, end int) {
- for l := range asm {
- if start == 0 || l < start {
- start = l
- }
- if end == 0 || l > end {
- end = l
+func (sp *sourcePrinter) initSamples(flat, cum map[uint64]int64) {
+ for addr, inst := range sp.insts {
+ // Move all samples that were assigned to the middle of an instruction to the
+ // beginning of that instruction. This takes care of samples that were recorded
+ // against pc+1.
+ instEnd := addr + uint64(inst.length)
+ for p := addr; p < instEnd; p++ {
+ inst.flat += flat[p]
+ inst.cum += cum[p]
}
+ sp.insts[addr] = inst
}
- return start, end
}
-// assemblyPerSourceLine disassembles the binary containing a symbol
-// and classifies the assembly instructions according to its
-// corresponding source line, annotating them with a set of samples.
-func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj plugin.ObjTool, intelSyntax bool) map[int][]assemblyInstruction {
- assembly := make(map[int][]assemblyInstruction)
- // Identify symbol to use for this collection of samples.
- o := findMatchingSymbol(objSyms, rs)
- if o == nil {
- return assembly
+func (sp *sourcePrinter) print(w io.Writer, maxFiles int, rpt *Report) {
+ // Finalize per-file counts.
+ for _, file := range sp.files {
+ seen := map[uint64]bool{}
+ for _, line := range file.lines {
+ for _, x := range line {
+ if seen[x.addr] {
+ // Same address can be displayed multiple times in a file
+ // (e.g., if we show multiple inlined functions).
+ // Avoid double-counting samples in this case.
+ continue
+ }
+ seen[x.addr] = true
+ inst := sp.insts[x.addr]
+ file.cum += inst.cum
+ file.flat += inst.flat
+ }
+ }
}
- // Extract assembly for matched symbol
- insts, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End, intelSyntax)
- if err != nil {
- return assembly
+ // Get sorted list of files to print.
+ var files []*sourceFile
+ for _, f := range sp.files {
+ files = append(files, f)
}
+ order := func(i, j int) bool { return files[i].flat > files[j].flat }
+ if maxFiles < 0 {
+ // Order by name for compatibility with old code.
+ order = func(i, j int) bool { return files[i].fname < files[j].fname }
+ maxFiles = len(files)
+ }
+ sort.Slice(files, order)
+ for i, f := range files {
+ if i < maxFiles {
+ sp.printFile(w, f, rpt)
+ }
+ }
+}
- srcBase := filepath.Base(src)
- anodes := annotateAssembly(insts, rs, o.base)
- var lineno = 0
- var prevline = 0
- for _, an := range anodes {
- // Do not rely solely on the line number produced by Disasm
- // since it is not what we want in the presence of inlining.
- //
- // E.g., suppose we are printing source code for F and this
- // instruction is from H where F called G called H and both
- // of those calls were inlined. We want to use the line
- // number from F, not from H (which is what Disasm gives us).
- //
- // So find the outer-most linenumber in the source file.
- found := false
- if frames, err := o.file.SourceLine(an.address + o.base); err == nil {
- for i := len(frames) - 1; i >= 0; i-- {
- if filepath.Base(frames[i].File) == srcBase {
- for j := i - 1; j >= 0; j-- {
- an.inlineCalls = append(an.inlineCalls, callID{frames[j].File, frames[j].Line})
- }
- lineno = frames[i].Line
- found = true
- break
+func (sp *sourcePrinter) printFile(w io.Writer, f *sourceFile, rpt *Report) {
+ for _, fn := range sp.functions(f) {
+ if fn.cum == 0 {
+ continue
+ }
+ printFunctionHeader(w, fn.name, f.fname, fn.flat, fn.cum, rpt)
+ var asm []assemblyInstruction
+ for l := fn.begin; l < fn.end; l++ {
+ lineContents, ok := sp.reader.line(f.fname, l)
+ if !ok {
+ if len(f.lines[l]) == 0 {
+ // Outside of range of valid lines and nothing to print.
+ continue
+ }
+ if l == 0 {
+ // Line number 0 shows up if line number is not known.
+ lineContents = "<instructions with unknown line numbers>"
+ } else {
+ // Past end of file, but have data to print.
+ lineContents = "???"
}
}
+
+ // Make list of assembly instructions.
+ asm = asm[:0]
+ var flatSum, cumSum int64
+ var lastAddr uint64
+ for _, inst := range f.lines[l] {
+ addr := inst.addr
+ x := sp.insts[addr]
+ flatSum += x.flat
+ cumSum += x.cum
+ startsBlock := (addr != lastAddr+uint64(sp.insts[lastAddr].length))
+ lastAddr = addr
+
+ // divisors already applied, so leave flatDiv,cumDiv as 0
+ asm = append(asm, assemblyInstruction{
+ address: x.objAddr,
+ instruction: x.disasm,
+ function: fn.name,
+ file: x.file,
+ line: x.line,
+ flat: x.flat,
+ cum: x.cum,
+ startsBlock: startsBlock,
+ inlineCalls: inst.stack,
+ })
+ }
+
+ printFunctionSourceLine(w, l, flatSum, cumSum, lineContents, asm, sp.reader, rpt)
+ }
+ printFunctionClosing(w)
+ }
+}
+
+// functions splits apart the lines to show in a file into a list of per-function ranges.
+func (sp *sourcePrinter) functions(f *sourceFile) []sourceFunction {
+ var funcs []sourceFunction
+
+ // Get interesting lines in sorted order.
+ lines := make([]int, 0, len(f.lines))
+ for l := range f.lines {
+ lines = append(lines, l)
+ }
+ sort.Ints(lines)
+
+ // Merge adjacent lines that are in same function and not too far apart.
+ const mergeLimit = 20
+ for _, l := range lines {
+ name := f.funcName[l]
+ if pretty, ok := sp.prettyNames[name]; ok {
+ // Use demangled name if available.
+ name = pretty
+ }
+
+ fn := sourceFunction{name: name, begin: l, end: l + 1}
+ for _, x := range f.lines[l] {
+ inst := sp.insts[x.addr]
+ fn.flat += inst.flat
+ fn.cum += inst.cum
}
- if !found && filepath.Base(an.file) == srcBase {
- lineno = an.line
+
+ // See if we should merge into preceding function.
+ if len(funcs) > 0 {
+ last := funcs[len(funcs)-1]
+ if l-last.end < mergeLimit && last.name == name {
+ last.end = l + 1
+ last.flat += fn.flat
+ last.cum += fn.cum
+ funcs[len(funcs)-1] = last
+ continue
+ }
}
- if lineno != 0 {
- if lineno != prevline {
- // This instruction starts a new block
- // of contiguous instructions on this line.
- an.startsBlock = true
+ // Add new function.
+ funcs = append(funcs, fn)
+ }
+
+ // Expand function boundaries to show neighborhood.
+ const expand = 5
+ for i, f := range funcs {
+ if i == 0 {
+ // Extend backwards, stopping at line number 1, but do not disturb 0
+ // since that is a special line number that can show up when addr2line
+ // cannot determine the real line number.
+ if f.begin > expand {
+ f.begin -= expand
+ } else if f.begin > 1 {
+ f.begin = 1
}
- prevline = lineno
- assembly[lineno] = append(assembly[lineno], an)
+ } else {
+ // Find gap from predecessor and divide between predecessor and f.
+ halfGap := (f.begin - funcs[i-1].end) / 2
+ if halfGap > expand {
+ halfGap = expand
+ }
+ funcs[i-1].end += halfGap
+ f.begin -= halfGap
}
+ funcs[i] = f
}
- return assembly
+ // Also extend the ending point of the last function.
+ if len(funcs) > 0 {
+ funcs[len(funcs)-1].end += expand
+ }
+
+ return funcs
}
-// findMatchingSymbol looks for the symbol that corresponds to a set
-// of samples, by comparing their addresses.
-func findMatchingSymbol(objSyms []*objSymbol, ns graph.Nodes) *objSymbol {
- for _, n := range ns {
- for _, o := range objSyms {
- if filepath.Base(o.sym.File) == filepath.Base(n.Info.Objfile) &&
- o.sym.Start <= n.Info.Address-o.base &&
- n.Info.Address-o.base <= o.sym.End {
- return o
- }
- }
+// objectFile return the object for the named file, opening it if necessary.
+// It returns nil on error.
+func (sp *sourcePrinter) objectFile(m *profile.Mapping) plugin.ObjFile {
+ if object, ok := sp.objects[m.File]; ok {
+ return object // May be nil if we detected an error earlier.
}
- return nil
+ object, err := sp.objectTool.Open(m.File, m.Start, m.Limit, m.Offset)
+ if err != nil {
+ object = nil
+ }
+ sp.objects[m.File] = object // Cache even on error.
+ return object
}
// printHeader prints the page header for a weblist report.
@@ -348,22 +714,23 @@ func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64,
}
// printFunctionSourceLine prints a source line and the corresponding assembly.
-func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) {
+func printFunctionSourceLine(w io.Writer, lineNo int, flat, cum int64, lineContents string,
+ assembly []assemblyInstruction, reader *sourceReader, rpt *Report) {
if len(assembly) == 0 {
fmt.Fprintf(w,
"<span class=line> %6d</span> <span class=nop> %10s %10s %8s %s </span>\n",
- fn.Info.Lineno,
- valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt),
- "", template.HTMLEscapeString(fn.Info.Name))
+ lineNo,
+ valueOrDot(flat, rpt), valueOrDot(cum, rpt),
+ "", template.HTMLEscapeString(lineContents))
return
}
fmt.Fprintf(w,
"<span class=line> %6d</span> <span class=deadsrc> %10s %10s %8s %s </span>",
- fn.Info.Lineno,
- valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt),
- "", template.HTMLEscapeString(fn.Info.Name))
- srcIndent := indentation(fn.Info.Name)
+ lineNo,
+ valueOrDot(flat, rpt), valueOrDot(cum, rpt),
+ "", template.HTMLEscapeString(lineContents))
+ srcIndent := indentation(lineContents)
fmt.Fprint(w, "<span class=asm>")
var curCalls []callID
for i, an := range assembly {
@@ -374,15 +741,9 @@ func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyIns
var fileline string
if an.file != "" {
- fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.file), an.line)
+ fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(filepath.Base(an.file)), an.line)
}
flat, cum := an.flat, an.cum
- if an.flatDiv != 0 {
- flat = flat / an.flatDiv
- }
- if an.cumDiv != 0 {
- cum = cum / an.cumDiv
- }
// Print inlined call context.
for j, c := range an.inlineCalls {
@@ -398,15 +759,18 @@ func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyIns
text := strings.Repeat(" ", srcIndent+4+4*j) + strings.TrimSpace(fline)
fmt.Fprintf(w, " %8s %10s %10s %8s <span class=inlinesrc>%s</span> <span class=unimportant>%s:%d</span>\n",
"", "", "", "",
- template.HTMLEscapeString(fmt.Sprintf("%-80s", text)),
+ template.HTMLEscapeString(rightPad(text, 80)),
template.HTMLEscapeString(filepath.Base(c.file)), c.line)
}
curCalls = an.inlineCalls
text := strings.Repeat(" ", srcIndent+4+4*len(curCalls)) + an.instruction
fmt.Fprintf(w, " %8s %10s %10s %8x: %s <span class=unimportant>%s</span>\n",
"", valueOrDot(flat, rpt), valueOrDot(cum, rpt), an.address,
- template.HTMLEscapeString(fmt.Sprintf("%-80s", text)),
- template.HTMLEscapeString(fileline))
+ template.HTMLEscapeString(rightPad(text, 80)),
+ // fileline should not be escaped since it was formed by appending
+ // line number (just digits) to an escaped file name. Escaping here
+ // would cause double-escaping of file name.
+ fileline)
}
fmt.Fprintln(w, "</span>")
}
@@ -482,36 +846,6 @@ func getSourceFromFile(file string, reader *sourceReader, fns graph.Nodes, start
return src, file, nil
}
-// getMissingFunctionSource creates a dummy function body to point to
-// the source file and annotates it with the samples in asm.
-func getMissingFunctionSource(filename string, asm map[int][]assemblyInstruction, start, end int) (graph.Nodes, string) {
- var fnodes graph.Nodes
- for i := start; i <= end; i++ {
- insts := asm[i]
- if len(insts) == 0 {
- continue
- }
- var group assemblyInstruction
- for _, insn := range insts {
- group.flat += insn.flat
- group.cum += insn.cum
- group.flatDiv += insn.flatDiv
- group.cumDiv += insn.cumDiv
- }
- flat := group.flatValue()
- cum := group.cumValue()
- fnodes = append(fnodes, &graph.Node{
- Info: graph.NodeInfo{
- Name: "???",
- Lineno: i,
- },
- Flat: flat,
- Cum: cum,
- })
- }
- return fnodes, filename
-}
-
// sourceReader provides access to source code with caching of file contents.
type sourceReader struct {
// searchPath is a filepath.ListSeparator-separated list of directories where
@@ -543,6 +877,7 @@ func (reader *sourceReader) fileError(path string) error {
return reader.errors[path]
}
+// line returns the line numbered "lineno" in path, or _,false if lineno is out of range.
func (reader *sourceReader) line(path string, lineno int) (string, bool) {
lines, ok := reader.files[path]
if !ok {
@@ -651,3 +986,37 @@ func indentation(line string) int {
}
return column
}
+
+// rightPad pads the input with spaces on the right-hand-side to make it have
+// at least width n. It treats tabs as enough spaces that lead to the next
+// 8-aligned tab-stop.
+func rightPad(s string, n int) string {
+ var str strings.Builder
+
+ // Convert tabs to spaces as we go so padding works regardless of what prefix
+ // is placed before the result.
+ column := 0
+ for _, c := range s {
+ column++
+ if c == '\t' {
+ str.WriteRune(' ')
+ for column%8 != 0 {
+ column++
+ str.WriteRune(' ')
+ }
+ } else {
+ str.WriteRune(c)
+ }
+ }
+ for column < n {
+ column++
+ str.WriteRune(' ')
+ }
+ return str.String()
+}
+
+func canonicalizeFileName(fname string) string {
+ fname = strings.TrimPrefix(fname, "/proc/self/cwd/")
+ fname = strings.TrimPrefix(fname, "./")
+ return filepath.Clean(fname)
+}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go
index 02a6d77248..26e8bdbba8 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go
@@ -25,12 +25,11 @@ func AddSourceTemplates(t *template.Template) {
}
const weblistPageCSS = `<style type="text/css">
-body {
+body #content{
font-family: sans-serif;
}
h1 {
font-size: 1.5em;
- margin-bottom: 4px;
}
.legend {
font-size: 1.25em;
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/merge.go b/src/cmd/vendor/github.com/google/pprof/profile/merge.go
index 4dcc27f48e..5ab6e9b9b0 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/merge.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/merge.go
@@ -35,7 +35,10 @@ func (p *Profile) Compact() *Profile {
// functions and mappings. Profiles must have identical profile sample
// and period types or the merge will fail. profile.Period of the
// resulting profile will be the maximum of all profiles, and
-// profile.TimeNanos will be the earliest nonzero one.
+// profile.TimeNanos will be the earliest nonzero one. Merges are
+// associative with the caveat of the first profile having some
+// specialization in how headers are combined. There may be other
+// subtleties now or in the future regarding associativity.
func Merge(srcs []*Profile) (*Profile, error) {
if len(srcs) == 0 {
return nil, fmt.Errorf("no profiles to merge")
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go
index d94d8b3d1c..2590c8ddb4 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/profile.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math"
"path/filepath"
"regexp"
"sort"
@@ -712,7 +713,8 @@ func (s *Sample) DiffBaseSample() bool {
return s.HasLabel("pprof::base", "true")
}
-// Scale multiplies all sample values in a profile by a constant.
+// Scale multiplies all sample values in a profile by a constant and keeps
+// only samples that have at least one non-zero value.
func (p *Profile) Scale(ratio float64) {
if ratio == 1 {
return
@@ -724,7 +726,8 @@ func (p *Profile) Scale(ratio float64) {
p.ScaleN(ratios)
}
-// ScaleN multiplies each sample values in a sample by a different amount.
+// ScaleN multiplies each sample values in a sample by a different amount
+// and keeps only samples that have at least one non-zero value.
func (p *Profile) ScaleN(ratios []float64) error {
if len(p.SampleType) != len(ratios) {
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
@@ -739,13 +742,22 @@ func (p *Profile) ScaleN(ratios []float64) error {
if allOnes {
return nil
}
+ fillIdx := 0
for _, s := range p.Sample {
+ keepSample := false
for i, v := range s.Value {
if ratios[i] != 1 {
- s.Value[i] = int64(float64(v) * ratios[i])
+ val := int64(math.Round(float64(v) * ratios[i]))
+ s.Value[i] = val
+ keepSample = keepSample || val != 0
}
}
+ if keepSample {
+ p.Sample[fillIdx] = s
+ fillIdx++
+ }
}
+ p.Sample = p.Sample[:fillIdx]
return nil
}