diff options
Diffstat (limited to 'src/runtime/stack.go')
| -rw-r--r-- | src/runtime/stack.go | 106 |
1 files changed, 82 insertions, 24 deletions
diff --git a/src/runtime/stack.go b/src/runtime/stack.go index d971e5e26f..b21c9c9518 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/abi" "internal/cpu" "runtime/internal/atomic" "runtime/internal/sys" @@ -91,6 +92,10 @@ const ( // The stack guard is a pointer this many bytes above the // bottom of the stack. + // + // The guard leaves enough room for one _StackSmall frame plus + // a _StackLimit chain of NOSPLIT calls plus _StackSystem + // bytes for the OS. _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem // After a stack split check the SP is allowed to be this @@ -122,16 +127,21 @@ const ( const ( uintptrMask = 1<<(8*sys.PtrSize) - 1 + // The values below can be stored to g.stackguard0 to force + // the next stack check to fail. + // These are all larger than any real SP. + // Goroutine preemption request. - // Stored into g->stackguard0 to cause split stack check failure. - // Must be greater than any real sp. // 0xfffffade in hex. stackPreempt = uintptrMask & -1314 - // Thread is forking. - // Stored into g->stackguard0 to cause split stack check failure. - // Must be greater than any real sp. + // Thread is forking. Causes a split stack check failure. + // 0xfffffb2e in hex. stackFork = uintptrMask & -1234 + + // Force a stack movement. Used for debugging. + // 0xfffffeed in hex. + stackForceMove = uintptrMask & -275 ) // Global pool of spans that have free stacks. @@ -692,15 +702,15 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { // we call into morestack.) continue } - t := obj.typ - gcdata := t.gcdata + ptrdata := obj.ptrdata() + gcdata := obj.gcdata var s *mspan - if t.kind&kindGCProg != 0 { + if obj.useGCProg() { // See comments in mgcmark.go:scanstack - s = materializeGCProg(t.ptrdata, gcdata) + s = materializeGCProg(ptrdata, gcdata) gcdata = (*byte)(unsafe.Pointer(s.startAddr)) } - for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize { + for i := uintptr(0); i < ptrdata; i += sys.PtrSize { if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 { adjustpointer(adjinfo, unsafe.Pointer(p+i)) } @@ -1054,11 +1064,18 @@ func newstack() { // recheck the bounds on return.) if f := findfunc(gp.sched.pc); f.valid() { max := uintptr(funcMaxSPDelta(f)) - for newsize-oldsize < max+_StackGuard { + for newsize-gp.sched.sp < max+_StackGuard { newsize *= 2 } } + if gp.stackguard0 == stackForceMove { + // Forced stack movement used for debugging. + // Don't double the stack (or we may quickly run out + // if this is done repeatedly). + newsize = oldsize + } + if newsize > maxstacksize || newsize > maxstackceiling { if maxstacksize < maxstackceiling { print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") @@ -1301,29 +1318,70 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args } // stack objects. - p := funcdata(f, _FUNCDATA_StackObjects) - if p != nil { - n := *(*uintptr)(p) - p = add(p, sys.PtrSize) - *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} - // Note: the noescape above is needed to keep - // getStackMap from "leaking param content: - // frame". That leak propagates up to getgcmask, then - // GCMask, then verifyGCInfo, which converts the stack - // gcinfo tests into heap gcinfo tests :( + if GOARCH == "amd64" && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil { + // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall. + // We don't actually use argmap in this case, but we need to fake the stack object + // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset + // on amd64. + objs = methodValueCallFrameObjs + } else { + p := funcdata(f, _FUNCDATA_StackObjects) + if p != nil { + n := *(*uintptr)(p) + p = add(p, sys.PtrSize) + *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} + // Note: the noescape above is needed to keep + // getStackMap from "leaking param content: + // frame". That leak propagates up to getgcmask, then + // GCMask, then verifyGCInfo, which converts the stack + // gcinfo tests into heap gcinfo tests :( + } } return } +var ( + abiRegArgsEface interface{} = abi.RegArgs{} + abiRegArgsType *_type = efaceOf(&abiRegArgsEface)._type + methodValueCallFrameObjs = []stackObjectRecord{ + { + off: -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local. + size: int32(abiRegArgsType.size), + _ptrdata: int32(abiRegArgsType.ptrdata), + gcdata: abiRegArgsType.gcdata, + }, + } +) + +func init() { + if abiRegArgsType.kind&kindGCProg != 0 { + throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs") + } +} + // A stackObjectRecord is generated by the compiler for each stack object in a stack frame. -// This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects. +// This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects. type stackObjectRecord struct { // offset in frame // if negative, offset from varp // if non-negative, offset from argp - off int - typ *_type + off int32 + size int32 + _ptrdata int32 // ptrdata, or -ptrdata is GC prog is used + gcdata *byte // pointer map or GC prog of the type +} + +func (r *stackObjectRecord) useGCProg() bool { + return r._ptrdata < 0 +} + +func (r *stackObjectRecord) ptrdata() uintptr { + x := r._ptrdata + if x < 0 { + return uintptr(-x) + } + return uintptr(x) } // This is exported as ABI0 via linkname so obj can call it. |
