aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2017-03-16 14:46:53 -0400
committerAustin Clements <austin@google.com>2017-04-13 18:20:35 +0000
commit407c56ae9f40d3a83ba4e259c67ccbb58d2485b0 (patch)
tree35963c69984756b7188fe468d2040524cd540889 /src/runtime
parentab9db51e1cf651c0d6d5b56dfd0d9d452f176726 (diff)
downloadgo-407c56ae9f40d3a83ba4e259c67ccbb58d2485b0.tar.xz
runtime: generalize {alloc,free}Stack to {alloc,free}Manual
We're going to start using manually-managed spans for GC workbufs, so rename the allocate/free methods and pass in a pointer to the stats to use instead of using the stack stats directly. For #19325. Change-Id: I37df0147ae5a8e1f3cb37d59c8e57a1fcc6f2980 Reviewed-on: https://go-review.googlesource.com/38576 Run-TryBot: Austin Clements <austin@google.com> Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/HACKING.md10
-rw-r--r--src/runtime/mheap.go41
-rw-r--r--src/runtime/stack.go12
3 files changed, 39 insertions, 24 deletions
diff --git a/src/runtime/HACKING.md b/src/runtime/HACKING.md
index ea7c5c128d..883559c690 100644
--- a/src/runtime/HACKING.md
+++ b/src/runtime/HACKING.md
@@ -238,11 +238,11 @@ go:notinheap
------------
`go:notinheap` applies to type declarations. It indicates that a type
-must never be heap allocated. Specifically, pointers to this type must
-always fail the `runtime.inheap` check. The type may be used for
-global variables, for stack variables, or for objects in unmanaged
-memory (e.g., allocated with `sysAlloc`, `persistentalloc`, or
-`fixalloc`). Specifically:
+must never be allocated from the GC'd heap. Specifically, pointers to
+this type must always fail the `runtime.inheap` check. The type may be
+used for global variables, for stack variables, or for objects in
+unmanaged memory (e.g., allocated with `sysAlloc`, `persistentalloc`,
+`fixalloc`, or from a manually-managed span). Specifically:
1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap
allocation of T are disallowed. (Though implicit allocations are
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 80349a9731..76e56828b6 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -664,11 +664,19 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
return s
}
-func (h *mheap) allocStack(npage uintptr) *mspan {
- _g_ := getg()
- if _g_ != _g_.m.g0 {
- throw("mheap_allocstack not on g0 stack")
- }
+// allocManual allocates a manually-managed span of npage pages and
+// adds the bytes used to *stat, which should be a memstats in-use
+// field. allocManual returns nil if allocation fails.
+//
+// The memory backing the returned span may not be zeroed if
+// span.needzero is set.
+//
+// allocManual must be called on the system stack to prevent stack
+// growth. Since this is used by the stack allocator, stack growth
+// during allocManual would self-deadlock.
+//
+//go:systemstack
+func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
lock(&h.lock)
s := h.allocSpanLocked(npage)
if s != nil {
@@ -679,10 +687,10 @@ func (h *mheap) allocStack(npage uintptr) *mspan {
s.nelems = 0
s.elemsize = 0
s.limit = s.base() + s.npages<<_PageShift
- memstats.stacks_inuse += uint64(s.npages << _PageShift)
+ *stat += uint64(s.npages << _PageShift)
}
- // This unlock acts as a release barrier. See mHeap_Alloc_m.
+ // This unlock acts as a release barrier. See mheap.alloc_m.
unlock(&h.lock)
return s
@@ -880,14 +888,21 @@ func (h *mheap) freeSpan(s *mspan, acct int32) {
})
}
-func (h *mheap) freeStack(s *mspan) {
- _g_ := getg()
- if _g_ != _g_.m.g0 {
- throw("mheap_freestack not on g0 stack")
- }
+// freeManual frees a manually-managed span returned by allocManual.
+// stat must be the same as the stat passed to the allocManual that
+// allocated s.
+//
+// This must only be called when gcphase == _GCoff. See mSpanState for
+// an explanation.
+//
+// freeManual must be called on the system stack to prevent stack
+// growth, just like allocManual.
+//
+//go:systemstack
+func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1
lock(&h.lock)
- memstats.stacks_inuse -= uint64(s.npages << _PageShift)
+ *stat -= uint64(s.npages << _PageShift)
h.freeSpanLocked(s, true, true, 0)
unlock(&h.lock)
}
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 9e00edde61..562427a6a2 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -186,7 +186,7 @@ func stackpoolalloc(order uint8) gclinkptr {
s := list.first
if s == nil {
// no free stacks. Allocate another span worth.
- s = mheap_.allocStack(_StackCacheSize >> _PageShift)
+ s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
if s == nil {
throw("out of memory")
}
@@ -248,7 +248,7 @@ func stackpoolfree(x gclinkptr, order uint8) {
// By not freeing, we prevent step #4 until GC is done.
stackpool[order].remove(s)
s.manualFreeList = 0
- mheap_.freeStack(s)
+ mheap_.freeManual(s, &memstats.stacks_inuse)
}
}
@@ -390,7 +390,7 @@ func stackalloc(n uint32) stack {
if s == nil {
// Allocate a new stack from the heap.
- s = mheap_.allocStack(npage)
+ s = mheap_.allocManual(npage, &memstats.stacks_inuse)
if s == nil {
throw("out of memory")
}
@@ -472,7 +472,7 @@ func stackfree(stk stack) {
if gcphase == _GCoff {
// Free the stack immediately if we're
// sweeping.
- mheap_.freeStack(s)
+ mheap_.freeManual(s, &memstats.stacks_inuse)
} else {
// If the GC is running, we can't return a
// stack span to the heap because it could be
@@ -1166,7 +1166,7 @@ func freeStackSpans() {
if s.allocCount == 0 {
list.remove(s)
s.manualFreeList = 0
- mheap_.freeStack(s)
+ mheap_.freeManual(s, &memstats.stacks_inuse)
}
s = next
}
@@ -1180,7 +1180,7 @@ func freeStackSpans() {
for s := stackLarge.free[i].first; s != nil; {
next := s.next
stackLarge.free[i].remove(s)
- mheap_.freeStack(s)
+ mheap_.freeManual(s, &memstats.stacks_inuse)
s = next
}
}