aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mprof.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mprof.go')
-rw-r--r--src/runtime/mprof.go46
1 files changed, 23 insertions, 23 deletions
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index df0f2552af..b51a1ad3ce 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -40,24 +40,6 @@ const (
// size of bucket hash table
buckHashSize = 179999
- // maxStack is the max depth of stack to record in bucket.
- // Note that it's only used internally as a guard against
- // wildly out-of-bounds slicing of the PCs that come after
- // a bucket struct, and it could increase in the future.
- // The term "1" accounts for the first stack entry being
- // taken up by a "skip" sentinel value for profilers which
- // defer inline frame expansion until the profile is reported.
- // The term "maxSkip" is for frame pointer unwinding, where we
- // want to end up with maxLogicalStack frames but will discard
- // some "physical" frames to account for skipping.
- maxStack = 1 + maxSkip + maxLogicalStack
-
- // maxLogicalStack is the maximum stack size of a call stack
- // to encode in a profile. This counts "logical" frames, which
- // includes inlined frames. We may record more than this many
- // "physical" frames when using frame pointer unwinding to account
- // for deferred handling of skipping frames & inline expansion.
- maxLogicalStack = 128
// maxSkip is to account for deferred inline expansion
// when using frame pointer unwinding. We record the stack
// with "physical" frame pointers but handle skipping "logical"
@@ -67,6 +49,11 @@ const (
// This should be at least as large as the largest skip value
// used for profiling; otherwise stacks may be truncated inconsistently
maxSkip = 5
+
+ // maxProfStackDepth is the highest valid value for debug.profstackdepth.
+ // It's used for the bucket.stk func.
+ // TODO(fg): can we get rid of this?
+ maxProfStackDepth = 1024
)
type bucketType int
@@ -254,10 +241,11 @@ func newBucket(typ bucketType, nstk int) *bucket {
return b
}
-// stk returns the slice in b holding the stack.
+// stk returns the slice in b holding the stack. The caller can asssume that the
+// backing array is immutable.
func (b *bucket) stk() []uintptr {
- stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
- if b.nstk > maxStack {
+ stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+ if b.nstk > maxProfStackDepth {
// prove that slicing works; otherwise a failure requires a P
throw("bad profile stack count")
}
@@ -455,7 +443,7 @@ func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
}
// Only use the part of mp.profStack we need and ignore the extra space
// reserved for delayed inline expansion with frame pointer unwinding.
- nstk := callers(4, mp.profStack[:maxLogicalStack])
+ nstk := callers(4, mp.profStack[:debug.profstackdepth])
index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
@@ -542,12 +530,18 @@ func blocksampled(cycles, rate int64) bool {
// skip should be positive if this event is recorded from the current stack
// (e.g. when this is not called from a system stack)
func saveblockevent(cycles, rate int64, skip int, which bucketType) {
+ if debug.profstackdepth == 0 {
+ // profstackdepth is set to 0 by the user, so mp.profStack is nil and we
+ // can't record a stack trace.
+ return
+ }
if skip > maxSkip {
print("requested skip=", skip)
throw("invalid skip value")
}
gp := getg()
mp := acquirem() // we must not be preempted while accessing profstack
+
nstk := 1
if tracefpunwindoff() || gp.m.hasCgoOnStack() {
mp.profStack[0] = logicalStackSentinel
@@ -736,6 +730,12 @@ func (prof *mLockProfile) recordUnlock(l *mutex) {
}
func (prof *mLockProfile) captureStack() {
+ if debug.profstackdepth == 0 {
+ // profstackdepth is set to 0 by the user, so mp.profStack is nil and we
+ // can't record a stack trace.
+ return
+ }
+
skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank
if staticLockRanking {
// When static lock ranking is enabled, we'll always be on the system
@@ -780,7 +780,7 @@ func (prof *mLockProfile) store() {
mp := acquirem()
prof.disabled = true
- nstk := maxStack
+ nstk := int(debug.profstackdepth)
for i := 0; i < nstk; i++ {
if pc := prof.stack[i]; pc == 0 {
nstk = i