diff options
Diffstat (limited to 'src/pkg/runtime/mprof.go')
| -rw-r--r-- | src/pkg/runtime/mprof.go | 654 |
1 files changed, 379 insertions, 275 deletions
diff --git a/src/pkg/runtime/mprof.go b/src/pkg/runtime/mprof.go index 1acfdb9b89..8546a341bd 100644 --- a/src/pkg/runtime/mprof.go +++ b/src/pkg/runtime/mprof.go @@ -2,142 +2,185 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Malloc profiling. +// Patterned after tcmalloc's algorithms; shorter code. + package runtime import ( "unsafe" ) -// Malloc profiling. -// Patterned after tcmalloc's algorithms; shorter code. - // NOTE(rsc): Everything here could use cas if contention became an issue. var proflock mutex // All memory allocations are local and do not escape outside of the profiler. // The profiler is forbidden from referring to garbage-collected memory. -/* -enum { MProf, BProf } // profile types -*/ +const ( + // profile types + memProfile bucketType = 1 + iota + blockProfile -/* + // size of bucket hash table + buckHashSize = 179999 + + // max depth of stack to record in bucket + maxStack = 32 +) + +type bucketType int + +// A bucket holds per-call-stack profiling information. +// The representation is a bit sleazy, inherited from C. +// This struct defines the bucket header. It is followed in +// memory by the stack words and then the actual record +// data, either a memRecord or a blockRecord. +// // Per-call-stack profiling information. // Lookup by hashing call stack into a linked-list hash table. -struct Bucket -{ - Bucket *next // next in hash list - Bucket *allnext // next in list of all mbuckets/bbuckets - int32 typ - // Generally unions can break precise GC, - // this one is fine because it does not contain pointers. - union - { - struct MProfRecord // typ == MProf - { - // The following complex 3-stage scheme of stats accumulation - // is required to obtain a consistent picture of mallocs and frees - // for some point in time. - // The problem is that mallocs come in real time, while frees - // come only after a GC during concurrent sweeping. So if we would - // naively count them, we would get a skew toward mallocs. - // - // Mallocs are accounted in recent stats. - // Explicit frees are accounted in recent stats. - // GC frees are accounted in prev stats. - // After GC prev stats are added to final stats and - // recent stats are moved into prev stats. - uintptr allocs - uintptr frees - uintptr alloc_bytes - uintptr free_bytes +type bucket struct { + next *bucket + allnext *bucket + typ bucketType // memBucket or blockBucket + hash uintptr + size uintptr + nstk uintptr +} + +// A memRecord is the bucket data for a bucket of type memProfile, +// part of the memory profile. +type memRecord struct { + // The following complex 3-stage scheme of stats accumulation + // is required to obtain a consistent picture of mallocs and frees + // for some point in time. + // The problem is that mallocs come in real time, while frees + // come only after a GC during concurrent sweeping. So if we would + // naively count them, we would get a skew toward mallocs. + // + // Mallocs are accounted in recent stats. + // Explicit frees are accounted in recent stats. + // GC frees are accounted in prev stats. + // After GC prev stats are added to final stats and + // recent stats are moved into prev stats. + allocs uintptr + frees uintptr + alloc_bytes uintptr + free_bytes uintptr - uintptr prev_allocs // since last but one till last gc - uintptr prev_frees - uintptr prev_alloc_bytes - uintptr prev_free_bytes + // changes between next-to-last GC and last GC + prev_allocs uintptr + prev_frees uintptr + prev_alloc_bytes uintptr + prev_free_bytes uintptr - uintptr recent_allocs // since last gc till now - uintptr recent_frees - uintptr recent_alloc_bytes - uintptr recent_free_bytes + // changes since last GC + recent_allocs uintptr + recent_frees uintptr + recent_alloc_bytes uintptr + recent_free_bytes uintptr +} - } mp - struct BProfRecord // typ == BProf - { - int64 count - int64 cycles - } bp - } data - uintptr hash // hash of size + stk - uintptr size - uintptr nstk - uintptr stk[1] +// A blockRecord is the bucket data for a bucket of type blockProfile, +// part of the blocking profile. +type blockRecord struct { + count int64 + cycles int64 } -*/ var ( - mbuckets *bucket // memory profile buckets - bbuckets *bucket // blocking profile buckets + mbuckets *bucket // memory profile buckets + bbuckets *bucket // blocking profile buckets + buckhash *[179999]*bucket + bucketmem uintptr ) -/* -enum { - BuckHashSize = 179999, +// newBucket allocates a bucket with the given type and number of stack entries. +func newBucket(typ bucketType, nstk int) *bucket { + size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0)) + switch typ { + default: + gothrow("invalid profile bucket type") + case memProfile: + size += unsafe.Sizeof(memRecord{}) + case blockProfile: + size += unsafe.Sizeof(blockRecord{}) + } + + b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys)) + bucketmem += size + b.typ = typ + b.nstk = uintptr(nstk) + return b } -static Bucket **buckhash -static uintptr bucketmem -*/ -/* -// Return the bucket for stk[0:nstk], allocating new bucket if needed. -static Bucket* -stkbucket(int32 typ, uintptr size, uintptr *stk, int32 nstk, bool alloc) -{ - int32 i - uintptr h - Bucket *b +// stk returns the slice in b holding the stack. +func (b *bucket) stk() []uintptr { + stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) + return stk[:b.nstk:b.nstk] +} + +// mp returns the memRecord associated with the memProfile bucket b. +func (b *bucket) mp() *memRecord { + if b.typ != memProfile { + gothrow("bad use of bucket.mp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*memRecord)(data) +} - if(buckhash == nil) { - buckhash = sysAlloc(BuckHashSize*sizeof buckhash[0], &mstats.buckhash_sys) - if(buckhash == nil) - throw("runtime: cannot allocate memory") +// bp returns the blockRecord associated with the blockProfile bucket b. +func (b *bucket) bp() *blockRecord { + if b.typ != blockProfile { + gothrow("bad use of bucket.bp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*blockRecord)(data) +} + +// Return the bucket for stk[0:nstk], allocating new bucket if needed. +func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { + if buckhash == nil { + buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys)) + if buckhash == nil { + gothrow("runtime: cannot allocate memory") + } } // Hash stack. - h = 0 - for(i=0 i<nstk i++) { - h += stk[i] - h += h<<10 - h ^= h>>6 + var h uintptr + for _, pc := range stk { + h += pc + h += h << 10 + h ^= h >> 6 } // hash in size h += size - h += h<<10 - h ^= h>>6 + h += h << 10 + h ^= h >> 6 // finalize - h += h<<3 - h ^= h>>11 + h += h << 3 + h ^= h >> 11 - i = h%BuckHashSize - for(b = buckhash[i] b b=b.next) - if(b.typ == typ && b.hash == h && b.size == size && b.nstk == nstk && - mcmp((byte*)b.stk, (byte*)stk, nstk*sizeof stk[0]) == 0) + i := int(h % buckHashSize) + for b := buckhash[i]; b != nil; b = b.next { + if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { return b + } + } - if(!alloc) + if !alloc { return nil + } - b = persistentalloc(sizeof *b + nstk*sizeof stk[0], 0, &mstats.buckhash_sys) - bucketmem += sizeof *b + nstk*sizeof stk[0] - memmove(b.stk, stk, nstk*sizeof stk[0]) - b.typ = typ + // Create new bucket. + b := newBucket(typ, len(stk)) + copy(b.stk(), stk) b.hash = h b.size = size - b.nstk = nstk b.next = buckhash[i] buckhash[i] = b - if(typ == MProf) { + if typ == memProfile { b.allnext = mbuckets mbuckets = b } else { @@ -146,129 +189,193 @@ stkbucket(int32 typ, uintptr size, uintptr *stk, int32 nstk, bool alloc) } return b } -*/ + +func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer + +func eqslice(x, y []uintptr) bool { + if len(x) != len(y) { + return false + } + for i, xi := range x { + if xi != y[i] { + return false + } + } + return true +} func mprof_GC() { for b := mbuckets; b != nil; b = b.allnext { - b.data.mp.allocs += b.data.mp.prev_allocs - b.data.mp.frees += b.data.mp.prev_frees - b.data.mp.alloc_bytes += b.data.mp.prev_alloc_bytes - b.data.mp.free_bytes += b.data.mp.prev_free_bytes + mp := b.mp() + mp.allocs += mp.prev_allocs + mp.frees += mp.prev_frees + mp.alloc_bytes += mp.prev_alloc_bytes + mp.free_bytes += mp.prev_free_bytes - b.data.mp.prev_allocs = b.data.mp.recent_allocs - b.data.mp.prev_frees = b.data.mp.recent_frees - b.data.mp.prev_alloc_bytes = b.data.mp.recent_alloc_bytes - b.data.mp.prev_free_bytes = b.data.mp.recent_free_bytes + mp.prev_allocs = mp.recent_allocs + mp.prev_frees = mp.recent_frees + mp.prev_alloc_bytes = mp.recent_alloc_bytes + mp.prev_free_bytes = mp.recent_free_bytes - b.data.mp.recent_allocs = 0 - b.data.mp.recent_frees = 0 - b.data.mp.recent_alloc_bytes = 0 - b.data.mp.recent_free_bytes = 0 + mp.recent_allocs = 0 + mp.recent_frees = 0 + mp.recent_alloc_bytes = 0 + mp.recent_free_bytes = 0 } } -/* // Record that a gc just happened: all the 'recent' statistics are now real. -void -MProf_GC(void) -{ +func mProf_GC() { lock(&proflock) - MProf_GC() + mprof_GC() unlock(&proflock) } -*/ -/* // Called by malloc to record a profiled block. -void -MProf_Malloc(void *p, uintptr size) -{ - uintptr stk[32] - Bucket *b - int32 nstk - - nstk = callers(1, stk, nelem(stk)) +func mProf_Malloc(p unsafe.Pointer, size uintptr) { + var stk [maxStack]uintptr + nstk := callers(1, &stk[0], int32(len(stk))) lock(&proflock) - b = stkbucket(MProf, size, stk, nstk, true) - b.data.mp.recent_allocs++ - b.data.mp.recent_alloc_bytes += size + b := stkbucket(memProfile, size, stk[:nstk], true) + mp := b.mp() + mp.recent_allocs++ + mp.recent_alloc_bytes += size unlock(&proflock) // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock. // This reduces potential contention and chances of deadlocks. - // Since the object must be alive during call to MProf_Malloc, + // Since the object must be alive during call to mProf_Malloc, // it's fine to do this non-atomically. setprofilebucket(p, b) } -*/ -/* -void -MProf_Free(Bucket *b, uintptr size, bool freed) -{ +func setprofilebucket(p unsafe.Pointer, b *bucket) // mheap.c + +// Called when freeing a profiled block. +func mProf_Free(b *bucket, size uintptr, freed bool) { lock(&proflock) - if(freed) { - b.data.mp.recent_frees++ - b.data.mp.recent_free_bytes += size + mp := b.mp() + if freed { + mp.recent_frees++ + mp.recent_free_bytes += size } else { - b.data.mp.prev_frees++ - b.data.mp.prev_free_bytes += size + mp.prev_frees++ + mp.prev_free_bytes += size } unlock(&proflock) } -*/ -/* -int64 blockprofilerate // in CPU ticks -*/ +var blockprofilerate uint64 // in CPU ticks -/* -void -SetBlockProfileRate(intgo rate) -{ - int64 r - - if(rate <= 0) - r = 0 // disable profiling - else { +// SetBlockProfileRate controls the fraction of goroutine blocking events +// that are reported in the blocking profile. The profiler aims to sample +// an average of one blocking event per rate nanoseconds spent blocked. +// +// To include every blocking event in the profile, pass rate = 1. +// To turn off profiling entirely, pass rate <= 0. +func SetBlockProfileRate(rate int) { + var r int64 + if rate <= 0 { + r = 0 // disable profiling + } else { // convert ns to cycles, use float64 to prevent overflow during multiplication - r = (float64)rate*tickspersecond()/(1000*1000*1000) - if(r == 0) + r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000)) + if r == 0 { r = 1 + } } - atomicstore64((uint64*)&blockprofilerate, r) + + atomicstore64(&blockprofilerate, uint64(r)) } -*/ -/* -void -blockevent(int64 cycles, int32 skip) -{ - int32 nstk - int64 rate - uintptr stk[32] - Bucket *b +func tickspersecond() int64 // runtime.c +func fastrand1() uint32 // runtime.c +func readgstatus(*g) uint32 // proc.c - if(cycles <= 0) +func blockevent(cycles int64, skip int) { + if cycles <= 0 { return - rate = atomicload64((uint64*)&blockprofilerate) - if(rate <= 0 || (rate > cycles && fastrand1()%rate > cycles)) + } + rate := int64(atomicload64(&blockprofilerate)) + if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) { return - - if(g.m.curg == nil || g.m.curg == g) - nstk = callers(skip, stk, nelem(stk)) - else - nstk = gcallers(g.m.curg, skip, stk, nelem(stk)) + } + gp := getg() + var nstk int + var stk [maxStack]uintptr + if gp.m.curg == nil || gp.m.curg == gp { + nstk = int(callers(int32(skip), &stk[0], int32(len(stk)))) + } else { + nstk = int(gcallers(gp.m.curg, int32(skip), &stk[0], int32(len(stk)))) + } lock(&proflock) - b = stkbucket(BProf, 0, stk, nstk, true) - b.data.bp.count++ - b.data.bp.cycles += cycles + b := stkbucket(blockProfile, 0, stk[:nstk], true) + b.bp().count++ + b.bp().cycles += cycles unlock(&proflock) } -*/ // Go interface to profile data. +// A StackRecord describes a single execution stack. +type StackRecord struct { + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *StackRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfileRate controls the fraction of memory allocations +// that are recorded and reported in the memory profile. +// The profiler aims to sample an average of +// one allocation per MemProfileRate bytes allocated. +// +// To include every allocated block in the profile, set MemProfileRate to 1. +// To turn off profiling entirely, set MemProfileRate to 0. +// +// The tools that process the memory profiles assume that the +// profile rate is constant across the lifetime of the program +// and equal to the current value. Programs that change the +// memory profiling rate should do so just once, as early as +// possible in the execution of the program (for example, +// at the beginning of main). +var MemProfileRate int = 512 * 1024 + +// A MemProfileRecord describes the live objects allocated +// by a particular call sequence (stack trace). +type MemProfileRecord struct { + AllocBytes, FreeBytes int64 // number of bytes allocated, freed + AllocObjects, FreeObjects int64 // number of objects allocated, freed + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes). +func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes } + +// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects). +func (r *MemProfileRecord) InUseObjects() int64 { + return r.AllocObjects - r.FreeObjects +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *MemProfileRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + // MemProfile returns n, the number of records in the current memory profile. // If len(p) >= n, MemProfile copies the profile into p and returns n, true. // If len(p) < n, MemProfile does not change p and returns n, false. @@ -285,10 +392,11 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { lock(&proflock) clear := true for b := mbuckets; b != nil; b = b.allnext { - if inuseZero || b.data.mp.alloc_bytes != b.data.mp.free_bytes { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { n++ } - if b.data.mp.allocs != 0 || b.data.mp.frees != 0 { + if mp.allocs != 0 || mp.frees != 0 { clear = false } } @@ -301,7 +409,8 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { mprof_GC() n = 0 for b := mbuckets; b != nil; b = b.allnext { - if inuseZero || b.data.mp.alloc_bytes != b.data.mp.free_bytes { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { n++ } } @@ -310,7 +419,8 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { ok = true idx := 0 for b := mbuckets; b != nil; b = b.allnext { - if inuseZero || b.data.mp.alloc_bytes != b.data.mp.free_bytes { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { record(&p[idx], b) idx++ } @@ -322,31 +432,33 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { // Write b's data to r. func record(r *MemProfileRecord, b *bucket) { - r.AllocBytes = int64(b.data.mp.alloc_bytes) - r.FreeBytes = int64(b.data.mp.free_bytes) - r.AllocObjects = int64(b.data.mp.allocs) - r.FreeObjects = int64(b.data.mp.frees) - for i := 0; uintptr(i) < b.nstk && i < len(r.Stack0); i++ { - r.Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize)) - } - for i := b.nstk; i < uintptr(len(r.Stack0)); i++ { + mp := b.mp() + r.AllocBytes = int64(mp.alloc_bytes) + r.FreeBytes = int64(mp.free_bytes) + r.AllocObjects = int64(mp.allocs) + r.FreeObjects = int64(mp.frees) + copy(r.Stack0[:], b.stk()) + for i := int(b.nstk); i < len(r.Stack0); i++ { r.Stack0[i] = 0 } } -/* -void -iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr)) -{ - Bucket *b - +func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) { lock(&proflock) - for(b=mbuckets b b=b.allnext) { - callback(b, b.nstk, b.stk, b.size, b.data.mp.allocs, b.data.mp.frees) + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees) } unlock(&proflock) } -*/ + +// BlockProfileRecord describes blocking events originated +// at a particular call sequence (stack trace). +type BlockProfileRecord struct { + Count int64 + Cycles int64 + StackRecord +} // BlockProfile returns n, the number of records in the current blocking profile. // If len(p) >= n, BlockProfile copies the profile into p and returns n, true. @@ -362,21 +474,16 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { } if n <= len(p) { ok = true - idx := 0 for b := bbuckets; b != nil; b = b.allnext { - bp := (*bprofrecord)(unsafe.Pointer(&b.data)) - p[idx].Count = int64(bp.count) - p[idx].Cycles = int64(bp.cycles) - i := 0 - for uintptr(i) < b.nstk && i < len(p[idx].Stack0) { - p[idx].Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize)) - i++ + bp := b.bp() + r := &p[0] + r.Count = int64(bp.count) + r.Cycles = int64(bp.cycles) + i := copy(r.Stack0[:], b.stk()) + for ; i < len(r.Stack0); i++ { + r.Stack0[i] = 0 } - for i < len(p[idx].Stack0) { - p[idx].Stack0[i] = 0 - i++ - } - idx++ + p = p[1:] } } unlock(&proflock) @@ -407,53 +514,54 @@ func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return } -/* -func GoroutineProfile(b Slice) (n int, ok bool) { - uintptr pc, sp, i - TRecord *r - G *gp +var allgs []*g // proc.c - sp = getcallersp(&b) - pc = (uintptr)getcallerpc(&b) +// GoroutineProfile returns n, the number of records in the active goroutine stack profile. +// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. +// If len(p) < n, GoroutineProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package instead +// of calling GoroutineProfile directly. +func GoroutineProfile(p []StackRecord) (n int, ok bool) { + sp := getcallersp(unsafe.Pointer(&p)) + pc := getcallerpc(unsafe.Pointer(&p)) - ok = false - n = gcount() - if(n <= b.len) { + n = NumGoroutine() + if n <= len(p) { + gp := getg() semacquire(&worldsema, false) - g.m.gcing = 1 + gp.m.gcing = 1 stoptheworld() - n = gcount() - if(n <= b.len) { + n = NumGoroutine() + if n <= len(p) { ok = true - r = (TRecord*)b.array - saveg(pc, sp, g, r++) - for(i = 0 i < allglen i++) { - gp = allg[i] - if(gp == g || readgstatus(gp) == Gdead) + r := p + saveg(pc, sp, gp, &r[0]) + r = r[1:] + for _, gp1 := range allgs { + if gp1 == gp || readgstatus(gp1) == _Gdead { continue - saveg(~(uintptr)0, ~(uintptr)0, gp, r++) + } + saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) + r = r[1:] } } - g.m.gcing = 0 + gp.m.gcing = 0 semrelease(&worldsema) starttheworld() } -} -*/ -/* -static void -saveg(uintptr pc, uintptr sp, G *gp, TRecord *r) -{ - int32 n + return n, ok +} - n = gentraceback(pc, sp, 0, gp, 0, r.stk, nelem(r.stk), nil, nil, false) - if(n < nelem(r.stk)) - r.stk[n] = 0 +func saveg(pc, sp uintptr, gp *g, r *StackRecord) { + n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], int32(len(r.Stack0)), nil, nil, false) + if int(n) < len(r.Stack0) { + r.Stack0[n] = 0 + } } -*/ // Stack formats a stack trace of the calling goroutine into buf // and returns the number of bytes written to buf. @@ -495,56 +603,52 @@ func Stack(buf []byte, all bool) int { return n } -/* // Tracing of alloc/free/gc. -static Mutex tracelock +var tracelock mutex -void -tracealloc(void *p, uintptr size, Type *type) -{ +func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) { lock(&tracelock) - g.m.traceback = 2 - if(type == nil) - printf("tracealloc(%p, %p)\n", p, size) - else - printf("tracealloc(%p, %p, %S)\n", p, size, *type.string) - if(g.m.curg == nil || g == g.m.curg) { - goroutineheader(g) - traceback((uintptr)getcallerpc(&p), (uintptr)getcallersp(&p), 0, g) + gp := getg() + gp.m.traceback = 2 + if typ == nil { + print("tracealloc(", p, ", ", hex(size), ")\n") + } else { + print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n") + } + if gp.m.curg == nil || gp == gp.m.curg { + goroutineheader(gp) + traceback(getcallerpc(unsafe.Pointer(&p)), getcallersp(unsafe.Pointer(&p)), 0, gp) } else { - goroutineheader(g.m.curg) - traceback(~(uintptr)0, ~(uintptr)0, 0, g.m.curg) + goroutineheader(gp.m.curg) + traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg) } - printf("\n") - g.m.traceback = 0 + print("\n") + gp.m.traceback = 0 unlock(&tracelock) } -void -tracefree(void *p, uintptr size) -{ +func tracefree(p unsafe.Pointer, size uintptr) { lock(&tracelock) - g.m.traceback = 2 - printf("tracefree(%p, %p)\n", p, size) - goroutineheader(g) - traceback((uintptr)getcallerpc(&p), (uintptr)getcallersp(&p), 0, g) - printf("\n") - g.m.traceback = 0 + gp := getg() + gp.m.traceback = 2 + print("tracefree(", p, ", ", hex(size), ")\n") + goroutineheader(gp) + traceback(getcallerpc(unsafe.Pointer(&p)), getcallersp(unsafe.Pointer(&p)), 0, gp) + print("\n") + gp.m.traceback = 0 unlock(&tracelock) } -void -tracegc(void) -{ +func tracegc() { lock(&tracelock) - g.m.traceback = 2 - printf("tracegc()\n") - // running on m.g0 stack show all non-g0 goroutines - tracebackothers(g) - printf("end tracegc\n") - printf("\n") - g.m.traceback = 0 + gp := getg() + gp.m.traceback = 2 + print("tracegc()\n") + // running on m->g0 stack; show all non-g0 goroutines + tracebackothers(gp) + print("end tracegc\n") + print("\n") + gp.m.traceback = 0 unlock(&tracelock) } -*/ |
