aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/stack.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/stack.go')
-rw-r--r--src/runtime/stack.go109
1 files changed, 50 insertions, 59 deletions
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 93f9769899..b87aa0d61b 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -219,7 +219,7 @@ func stackpoolalloc(order uint8) gclinkptr {
// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func stackpoolfree(x gclinkptr, order uint8) {
s := spanOfUnchecked(uintptr(x))
- if s.state != mSpanManual {
+ if s.state.get() != mSpanManual {
throw("freeing stack not in a stack span")
}
if s.manualFreeList.ptr() == nil {
@@ -467,7 +467,7 @@ func stackfree(stk stack) {
}
} else {
s := spanOfUnchecked(uintptr(v))
- if s.state != mSpanManual {
+ if s.state.get() != mSpanManual {
println(hex(s.base()), v)
throw("bad span state")
}
@@ -786,10 +786,6 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
}
// Lock channels to prevent concurrent send/receive.
- // It's important that we *only* do this for async
- // copystack; otherwise, gp may be in the middle of
- // putting itself on wait queues and this would
- // self-deadlock.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc {
@@ -826,12 +822,7 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
-//
-// If sync is true, this is a self-triggered stack growth and, in
-// particular, no other G may be writing to gp's stack (e.g., via a
-// channel operation). If sync is false, copystack protects against
-// concurrent channel operations.
-func copystack(gp *g, newsize uintptr, sync bool) {
+func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
@@ -857,15 +848,16 @@ func copystack(gp *g, newsize uintptr, sync bool) {
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
- if sync {
+ if !gp.activeStackChans {
adjustsudogs(gp, &adjinfo)
} else {
- // sudogs can point in to the stack. During concurrent
- // shrinking, these areas may be written to. Find the
- // highest such pointer so we can handle everything
- // there and below carefully. (This shouldn't be far
- // from the bottom of the stack, so there's little
- // cost in handling everything below it carefully.)
+ // sudogs may be pointing in to the stack and gp has
+ // released channel locks, so other goroutines could
+ // be writing to gp's stack. Find the highest such
+ // pointer so we can handle everything there and below
+ // carefully. (This shouldn't be far from the bottom
+ // of the stack, so there's little cost in handling
+ // everything below it carefully.)
adjinfo.sghi = findsghi(gp, old)
// Synchronize with channel ops and copy the part of
@@ -916,7 +908,7 @@ func round2(x int32) int32 {
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
-// If the GC is trying to stop this g then it will set preemptscan to true.
+// If the scheduler is trying to stop this g, then it will set preemptStop.
//
// This must be nowritebarrierrec because it can be called as part of
// stack growth from other nowritebarrierrec functions, but the
@@ -983,7 +975,7 @@ func newstack() {
// it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock.
if preempt {
- if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
+ if !canPreemptM(thisg.m) {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + _StackGuard
@@ -1017,34 +1009,19 @@ func newstack() {
if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not")
}
- // Synchronize with scang.
- casgstatus(gp, _Grunning, _Gwaiting)
- if gp.preemptscan {
- for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
- // Likely to be racing with the GC as
- // it sees a _Gwaiting and does the
- // stack scan. If so, gcworkdone will
- // be set and gcphasework will simply
- // return.
- }
- if !gp.gcscandone {
- // gcw is safe because we're on the
- // system stack.
- gcw := &gp.m.p.ptr().gcw
- scanstack(gp, gcw)
- gp.gcscandone = true
- }
- gp.preemptscan = false
- gp.preempt = false
- casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
- // This clears gcscanvalid.
- casgstatus(gp, _Gwaiting, _Grunning)
- gp.stackguard0 = gp.stack.lo + _StackGuard
- gogo(&gp.sched) // never return
+
+ if gp.preemptShrink {
+ // We're at a synchronous safe point now, so
+ // do the pending stack shrink.
+ gp.preemptShrink = false
+ shrinkstack(gp)
+ }
+
+ if gp.preemptStop {
+ preemptPark(gp) // never returns
}
// Act like goroutine called runtime.Gosched.
- casgstatus(gp, _Gwaiting, _Grunning)
gopreempt_m(gp) // never return
}
@@ -1062,7 +1039,7 @@ func newstack() {
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
- copystack(gp, newsize, true)
+ copystack(gp, newsize)
if stackDebug >= 1 {
print("stack grow done\n")
}
@@ -1087,16 +1064,36 @@ func gostartcallfn(gobuf *gobuf, fv *funcval) {
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
+// isShrinkStackSafe returns whether it's safe to attempt to shrink
+// gp's stack. Shrinking the stack is only safe when we have precise
+// pointer maps for all frames on the stack.
+func isShrinkStackSafe(gp *g) bool {
+ // We can't copy the stack if we're in a syscall.
+ // The syscall might have pointers into the stack and
+ // often we don't have precise pointer maps for the innermost
+ // frames.
+ return gp.syscallsp == 0
+}
+
// Maybe shrink the stack being used by gp.
-// Called at garbage collection time.
-// gp must be stopped, but the world need not be.
+//
+// gp must be stopped and we must own its stack. It may be in
+// _Grunning, but only if this is our own user G.
func shrinkstack(gp *g) {
- gstatus := readgstatus(gp)
if gp.stack.lo == 0 {
throw("missing stack in shrinkstack")
}
- if gstatus&_Gscan == 0 {
- throw("bad status in shrinkstack")
+ if s := readgstatus(gp); s&_Gscan == 0 {
+ // We don't own the stack via _Gscan. We could still
+ // own it if this is our own user G and we're on the
+ // system stack.
+ if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
+ // We don't own the stack.
+ throw("bad status in shrinkstack")
+ }
+ }
+ if !isShrinkStackSafe(gp) {
+ throw("shrinkstack at bad time")
}
// Check for self-shrinks while in a libcall. These may have
// pointers into the stack disguised as uintptrs, but these
@@ -1132,17 +1129,11 @@ func shrinkstack(gp *g) {
return
}
- // We can't copy the stack if we're in a syscall.
- // The syscall might have pointers into the stack.
- if gp.syscallsp != 0 {
- return
- }
-
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
- copystack(gp, newsize, false)
+ copystack(gp, newsize)
}
// freeStackSpans frees unused stack spans at the end of GC.