aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-11-11 17:05:02 -0500
committerRuss Cox <rsc@golang.org>2014-11-11 17:05:02 -0500
commit1e2d2f09470a2be58a98420b4cecae731b156ee8 (patch)
tree4e787523cc5b7e166ff675e1d3da6b1da2207170 /src/runtime/malloc.go
parentd98553a72782efb2d96c6d6f5e12869826a56779 (diff)
downloadgo-1e2d2f09470a2be58a98420b4cecae731b156ee8.tar.xz
[dev.cc] runtime: convert memory allocator and garbage collector to Go
The conversion was done with an automated tool and then modified only as necessary to make it compile and run. [This CL is part of the removal of C code from package runtime. See golang.org/s/dev.cc for an overview.] LGTM=r R=r CC=austin, dvyukov, golang-codereviews, iant, khr https://golang.org/cl/167540043
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go54
1 files changed, 21 insertions, 33 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 8cf1c3d342..a11724500f 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -28,10 +28,11 @@ const (
maxGCMask = _MaxGCMask
bitsDead = _BitsDead
bitsPointer = _BitsPointer
+ bitsScalar = _BitsScalar
mSpanInUse = _MSpanInUse
- concurrentSweep = _ConcurrentSweep != 0
+ concurrentSweep = _ConcurrentSweep
)
// Page number (address>>pageShift)
@@ -142,10 +143,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[tinySizeClass]
v := s.freelist
if v == nil {
- mp := acquirem()
- mp.scalararg[0] = tinySizeClass
- onM(mcacheRefill_m)
- releasem(mp)
+ onM(func() {
+ mCache_Refill(c, tinySizeClass)
+ })
s = c.alloc[tinySizeClass]
v = s.freelist
}
@@ -173,10 +173,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[sizeclass]
v := s.freelist
if v == nil {
- mp := acquirem()
- mp.scalararg[0] = uintptr(sizeclass)
- onM(mcacheRefill_m)
- releasem(mp)
+ onM(func() {
+ mCache_Refill(c, int32(sizeclass))
+ })
s = c.alloc[sizeclass]
v = s.freelist
}
@@ -193,13 +192,10 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
}
c.local_cachealloc += intptr(size)
} else {
- mp := acquirem()
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(flags)
- onM(largeAlloc_m)
- s = (*mspan)(mp.ptrarg[0])
- mp.ptrarg[0] = nil
- releasem(mp)
+ var s *mspan
+ onM(func() {
+ s = largeAlloc(size, uint32(flags))
+ })
x = unsafe.Pointer(uintptr(s.start << pageShift))
size = uintptr(s.elemsize)
}
@@ -359,7 +355,7 @@ func newarray(typ *_type, n uintptr) unsafe.Pointer {
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
- if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) {
+ if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
panic("runtime: allocation size out of range")
}
return mallocgc(uintptr(typ.size)*n, typ, flags)
@@ -585,10 +581,9 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
ftyp := f._type
if ftyp == nil {
// switch to M stack and remove finalizer
- mp := acquirem()
- mp.ptrarg[0] = e.data
- onM(removeFinalizer_m)
- releasem(mp)
+ onM(func() {
+ removefinalizer(e.data)
+ })
return
}
@@ -633,18 +628,11 @@ okarg:
// make sure we have a finalizer goroutine
createfing()
- // switch to M stack to add finalizer record
- mp := acquirem()
- mp.ptrarg[0] = f.data
- mp.ptrarg[1] = e.data
- mp.scalararg[0] = nret
- mp.ptrarg[2] = unsafe.Pointer(fint)
- mp.ptrarg[3] = unsafe.Pointer(ot)
- onM(setFinalizer_m)
- if mp.scalararg[0] != 1 {
- gothrow("runtime.SetFinalizer: finalizer already set")
- }
- releasem(mp)
+ onM(func() {
+ if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
+ gothrow("runtime.SetFinalizer: finalizer already set")
+ }
+ })
}
// round n up to a multiple of a. a must be a power of 2.