aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorRick Hudson <rlh@golang.org>2016-02-08 12:36:23 -0500
committerRick Hudson <rlh@golang.org>2016-04-27 21:54:41 +0000
commite1c4e9a754833e169a41ea98a49c3712513879ab (patch)
treefa2c103b3522167925a7838d3225c674b7e634c2 /src/runtime
parentaed861038f876643a67c2297b384b6be140c46c1 (diff)
downloadgo-e1c4e9a754833e169a41ea98a49c3712513879ab.tar.xz
[dev.garbage] runtime: refactor next free object
In preparation for changing how the next free object is chosen refactor and consolidate code into a single function. Change-Id: I6836cd88ed7cbf0b2df87abd7c1c3b9fabc1cbd8 Reviewed-on: https://go-review.googlesource.com/19317 Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/malloc.go59
1 files changed, 30 insertions, 29 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index fe13b8b9a3..70e7358e88 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -496,6 +496,32 @@ const (
_FlagNoZero = 1 << 1 // don't zero memory
)
+// nextFree returns the next free object from the cached span if one is available.
+// Otherwise it refills the cache with a span with an available object and
+// returns that object along with a flag indicating that this was a heavy
+// weight allocation. If it is a heavy weight allocation the caller must
+// determine whether a new GC cycle needs to be started or if the GC is active
+// whether this goroutine needs to assist the GC.
+// https://golang.org/cl/5350 motivates why this routine should preform a
+// prefetch.
+func (c *mcache) nextFree(sizeclass int8) (v gclinkptr, shouldhelpgc bool) {
+ s := c.alloc[sizeclass]
+ v = s.freelist
+ if v.ptr() == nil {
+ systemstack(func() {
+ c.refill(int32(sizeclass))
+ })
+ shouldhelpgc = true
+ s = c.alloc[sizeclass]
+ v = s.freelist
+ }
+ s.freelist = v.ptr().next
+ s.ref++
+ // prefetchnta offers best performance, see change list message.
+ prefetchnta(uintptr(v.ptr().next))
+ return
+}
+
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
@@ -554,7 +580,6 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
shouldhelpgc := false
dataSize := size
c := gomcache()
- var s *mspan
var x unsafe.Pointer
if size <= maxSmallSize {
if flags&flagNoScan != 0 && size < maxTinySize {
@@ -606,20 +631,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
return x
}
// Allocate a new maxTinySize block.
- s = c.alloc[tinySizeClass]
- v := s.freelist
- if v.ptr() == nil {
- systemstack(func() {
- c.refill(tinySizeClass)
- })
- shouldhelpgc = true
- s = c.alloc[tinySizeClass]
- v = s.freelist
- }
- s.freelist = v.ptr().next
- s.ref++
- // prefetchnta offers best performance, see change list message.
- prefetchnta(uintptr(v.ptr().next))
+ var v gclinkptr
+ v, shouldhelpgc = c.nextFree(tinySizeClass)
x = unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
(*[2]uint64)(x)[1] = 0
@@ -638,20 +651,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
sizeclass = size_to_class128[(size-1024+127)>>7]
}
size = uintptr(class_to_size[sizeclass])
- s = c.alloc[sizeclass]
- v := s.freelist
- if v.ptr() == nil {
- systemstack(func() {
- c.refill(int32(sizeclass))
- })
- shouldhelpgc = true
- s = c.alloc[sizeclass]
- v = s.freelist
- }
- s.freelist = v.ptr().next
- s.ref++
- // prefetchnta offers best performance, see change list message.
- prefetchnta(uintptr(v.ptr().next))
+ var v gclinkptr
+ v, shouldhelpgc = c.nextFree(sizeclass)
x = unsafe.Pointer(v)
if flags&flagNoZero == 0 {
v.ptr().next = 0