aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/stack.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/stack.go')
-rw-r--r--src/runtime/stack.go41
1 files changed, 4 insertions, 37 deletions
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 0f1a5c1c55..1bd7e87b2e 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -320,7 +320,7 @@ func stackcache_clear(c *mcache) {
// resources and must not split the stack.
//
//go:systemstack
-func stackalloc(n uint32) (stack, []stkbar) {
+func stackalloc(n uint32) stack {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
@@ -335,21 +335,12 @@ func stackalloc(n uint32) (stack, []stkbar) {
print("stackalloc ", n, "\n")
}
- // Compute the size of stack barrier array.
- maxstkbar := gcMaxStackBarriers(int(n))
- nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
- var stkbarSlice slice
-
if debug.efence != 0 || stackFromSystem != 0 {
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
if v == nil {
throw("out of memory (stackalloc)")
}
- top := uintptr(n) - nstkbar
- if maxstkbar != 0 {
- stkbarSlice = slice{add(v, top), 0, maxstkbar}
- }
- return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
+ return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
// Small stacks are allocated with a fixed-size free-list allocator.
@@ -415,11 +406,7 @@ func stackalloc(n uint32) (stack, []stkbar) {
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
- top := uintptr(n) - nstkbar
- if maxstkbar != 0 {
- stkbarSlice = slice{add(v, top), 0, maxstkbar}
- }
- return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
+ return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
// stackfree frees an n byte stack allocation at stk.
@@ -774,12 +761,6 @@ func adjustsudogs(gp *g, adjinfo *adjustinfo) {
}
}
-func adjuststkbar(gp *g, adjinfo *adjustinfo) {
- for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
- adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
- }
-}
-
func fillstack(stk stack, b byte) {
for p := stk.lo; p < stk.hi; p++ {
*(*byte)(unsafe.Pointer(p)) = b
@@ -866,7 +847,7 @@ func copystack(gp *g, newsize uintptr, sync bool) {
used := old.hi - gp.sched.sp
// allocate new stack
- new, newstkbar := stackalloc(uint32(newsize))
+ new := stackalloc(uint32(newsize))
if stackPoisonCopy != 0 {
fillstack(new, 0xfd)
}
@@ -900,39 +881,27 @@ func copystack(gp *g, newsize uintptr, sync bool) {
// Copy the stack (or the rest of it) to the new location
memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
- // Disallow sigprof scans of this stack and block if there's
- // one in progress.
- gcLockStackBarriers(gp)
-
// Adjust remaining structures that have pointers into stacks.
// We have to do most of these before we traceback the new
// stack because gentraceback uses them.
adjustctxt(gp, &adjinfo)
adjustdefers(gp, &adjinfo)
adjustpanics(gp, &adjinfo)
- adjuststkbar(gp, &adjinfo)
if adjinfo.sghi != 0 {
adjinfo.sghi += adjinfo.delta
}
- // copy old stack barriers to new stack barrier array
- newstkbar = newstkbar[:len(gp.stkbar)]
- copy(newstkbar, gp.stkbar)
-
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
oldsize := gp.stackAlloc
gp.stackAlloc = newsize
- gp.stkbar = newstkbar
gp.stktopsp += adjinfo.delta
// Adjust pointers in the new stack.
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
- gcUnlockStackBarriers(gp)
-
// free old stack
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
@@ -1132,8 +1101,6 @@ func shrinkstack(gp *g) {
stackfree(gp.stack, gp.stackAlloc)
gp.stack.lo = 0
gp.stack.hi = 0
- gp.stkbar = nil
- gp.stkbarPos = 0
}
return
}