diff options
| author | Michael Anthony Knyszek <mknyszek@google.com> | 2019-05-17 14:48:04 +0000 |
|---|---|---|
| committer | Michael Knyszek <mknyszek@google.com> | 2019-05-24 15:34:57 +0000 |
| commit | 7ed7669c0d35768dbb73eb33d7dc0098e45421b1 (patch) | |
| tree | 33210a5887e63784342843d563c604e3dbce1216 /src/runtime/export_test.go | |
| parent | db3255536cdbdba9d6f38da9c3bb1faf55f63277 (diff) | |
| download | go-7ed7669c0d35768dbb73eb33d7dc0098e45421b1.tar.xz | |
runtime: ensure mheap lock stack growth invariant is maintained
Currently there's an invariant in the runtime wherein the heap lock
can only be acquired on the system stack, otherwise a self-deadlock
could occur if the stack grows while the lock is held.
This invariant is upheld and documented in a number of situations (e.g.
allocManual, freeManual) but there are other places where the invariant
is either not maintained at all which risks self-deadlock (e.g.
setGCPercent, gcResetMarkState, allocmcache) or is maintained but
undocumented (e.g. gcSweep, readGCStats_m).
This change adds go:systemstack to any function that acquires the heap
lock or adds a systemstack(func() { ... }) around the critical section,
where appropriate. It also documents the invariant on (*mheap).lock
directly and updates repetitive documentation to refer to that comment.
Fixes #32105.
Change-Id: I702b1290709c118b837389c78efde25c51a2cafb
Reviewed-on: https://go-review.googlesource.com/c/go/+/177857
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
| -rw-r--r-- | src/runtime/export_test.go | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 3c3e110f89..62b7730c44 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -545,18 +545,23 @@ type Span struct { } func AllocSpan(base, npages uintptr, scavenged bool) Span { - lock(&mheap_.lock) - s := (*mspan)(mheap_.spanalloc.alloc()) - unlock(&mheap_.lock) + var s *mspan + systemstack(func() { + lock(&mheap_.lock) + s = (*mspan)(mheap_.spanalloc.alloc()) + unlock(&mheap_.lock) + }) s.init(base, npages) s.scavenged = scavenged return Span{s} } func (s *Span) Free() { - lock(&mheap_.lock) - mheap_.spanalloc.free(unsafe.Pointer(s.mspan)) - unlock(&mheap_.lock) + systemstack(func() { + lock(&mheap_.lock) + mheap_.spanalloc.free(unsafe.Pointer(s.mspan)) + unlock(&mheap_.lock) + }) s.mspan = nil } @@ -629,9 +634,11 @@ func (t *Treap) Insert(s Span) { // allocation which requires the mheap_ lock to manipulate. // Locking here is safe because the treap itself never allocs // or otherwise ends up grabbing this lock. - lock(&mheap_.lock) - t.insert(s.mspan) - unlock(&mheap_.lock) + systemstack(func() { + lock(&mheap_.lock) + t.insert(s.mspan) + unlock(&mheap_.lock) + }) t.CheckInvariants() } @@ -644,17 +651,21 @@ func (t *Treap) Erase(i TreapIter) { // freeing which requires the mheap_ lock to manipulate. // Locking here is safe because the treap itself never allocs // or otherwise ends up grabbing this lock. - lock(&mheap_.lock) - t.erase(i.treapIter) - unlock(&mheap_.lock) + systemstack(func() { + lock(&mheap_.lock) + t.erase(i.treapIter) + unlock(&mheap_.lock) + }) t.CheckInvariants() } func (t *Treap) RemoveSpan(s Span) { // See Erase about locking. - lock(&mheap_.lock) - t.removeSpan(s.mspan) - unlock(&mheap_.lock) + systemstack(func() { + lock(&mheap_.lock) + t.removeSpan(s.mspan) + unlock(&mheap_.lock) + }) t.CheckInvariants() } |
