aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2019-10-24 09:00:27 -0400
committerAustin Clements <austin@google.com>2019-10-31 17:09:48 +0000
commita9b37ae02604e03d2356b6143679d2a71bdd32a7 (patch)
treec60a24a4d12ae59fd6b1b564f8c0d7bd0037668f /src
parentd5caea771e917e13c4d05a6d7c5a009bf719d299 (diff)
downloadgo-a9b37ae02604e03d2356b6143679d2a71bdd32a7.tar.xz
runtime: fully initialize span in alloc_m
Currently, several important fields of a heap span are set by heapBits.initSpan, which happens after the span has already been published and returned from the locked region of alloc_m. In particular, allocBits is set very late, which makes mspan.isFree unsafe even if you were to lock the heap because it tries to access allocBits. This CL fixes this by populating these fields in alloc_m. The next CL builds on this to only publish the span once it is fully initialized. Together, they'll make it safe to check allocBits even if there is a race with alloc_m. For #10958, #24543, but a good fix in general. Change-Id: I7fde90023af0f497e826b637efa4d19c32840c08 Reviewed-on: https://go-review.googlesource.com/c/go/+/203285 Run-TryBot: Austin Clements <austin@google.com> Reviewed-by: Cherry Zhang <cherryyz@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/runtime/mbitmap.go16
-rw-r--r--src/runtime/mheap.go27
2 files changed, 28 insertions, 15 deletions
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index d131bab600..9600cddac8 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -792,29 +792,19 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
// words to pointer/scan.
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
- size, n, total := s.layout()
-
- // Init the markbit structures
- s.freeindex = 0
- s.allocCache = ^uint64(0) // all 1s indicating all free.
- s.nelems = n
- s.allocBits = nil
- s.gcmarkBits = nil
- s.gcmarkBits = newMarkBits(s.nelems)
- s.allocBits = newAllocBits(s.nelems)
-
// Clear bits corresponding to objects.
- nw := total / sys.PtrSize
+ nw := (s.npages << _PageShift) / sys.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
+ isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
- if sys.PtrSize == 8 && size == sys.PtrSize {
+ if isPtrs {
bitp := h.bitp
for i := uintptr(0); i < nbyte; i++ {
*bitp = bitPointerAll | bitScanAll
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 3807050cbe..d9c8bbae7e 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1012,6 +1012,23 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
h.reclaim(npage)
}
+ // Compute size information.
+ nbytes := npage << _PageShift
+ var elemSize, nelems uintptr
+ if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
+ elemSize = nbytes
+ nelems = 1
+ } else {
+ elemSize = uintptr(class_to_size[sizeclass])
+ nelems = nbytes / elemSize
+ }
+
+ // Allocate mark and allocation bits before we take the heap
+ // lock. We'll drop these on the floor if we fail to allocate
+ // the span, but in that case we'll panic soon.
+ gcmarkBits := newMarkBits(nelems)
+ allocBits := newAllocBits(nelems)
+
lock(&h.lock)
// transfer stats from cache to global
memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
@@ -1028,14 +1045,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
s.state = mSpanInUse
s.allocCount = 0
s.spanclass = spanclass
+ s.elemsize = elemSize
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
- s.elemsize = s.npages << _PageShift
s.divShift = 0
s.divMul = 0
s.divShift2 = 0
s.baseMask = 0
} else {
- s.elemsize = uintptr(class_to_size[sizeclass])
m := &class_to_divmagic[sizeclass]
s.divShift = m.shift
s.divMul = m.mul
@@ -1043,6 +1059,13 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
s.baseMask = m.baseMask
}
+ // Initialize mark and allocation structures.
+ s.freeindex = 0
+ s.allocCache = ^uint64(0) // all 1s indicating all free.
+ s.nelems = nelems
+ s.gcmarkBits = gcmarkBits
+ s.allocBits = allocBits
+
// Mark in-use span in arena page bitmap.
arena, pageIdx, pageMask := pageIndexOf(s.base())
arena.pageInUse[pageIdx] |= pageMask