aboutsummaryrefslogtreecommitdiff
path: root/src/pkg/runtime/malloc.h
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2010-02-10 00:00:12 -0800
committerRuss Cox <rsc@golang.org>2010-02-10 00:00:12 -0800
commitf25586a306cab6bf06fee66336ba77c0fac471c6 (patch)
treebae3dce937cbe88eefba982cdbafa536f8f7aa4a /src/pkg/runtime/malloc.h
parent0cba5fc051e07a25e51eb7eb16605d871859f116 (diff)
downloadgo-f25586a306cab6bf06fee66336ba77c0fac471c6.tar.xz
runtime: garbage collection + malloc performance
* add bit tracking finalizer status, avoiding getfinalizer lookup * add ability to allocate uncleared memory R=iant CC=golang-dev https://golang.org/cl/207044
Diffstat (limited to 'src/pkg/runtime/malloc.h')
-rw-r--r--src/pkg/runtime/malloc.h23
1 files changed, 18 insertions, 5 deletions
diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h
index 3a3b9bef6f..2d94872f77 100644
--- a/src/pkg/runtime/malloc.h
+++ b/src/pkg/runtime/malloc.h
@@ -67,10 +67,22 @@
// Allocating and freeing a large object uses the page heap
// directly, bypassing the MCache and MCentral free lists.
//
+// The small objects on the MCache and MCentral free lists
+// may or may not be zeroed. They are zeroed if and only if
+// the second word of the object is zero. The spans in the
+// page heap are always zeroed. When a span full of objects
+// is returned to the page heap, the objects that need to be
+// are zeroed first. There are two main benefits to delaying the
+// zeroing this way:
+//
+// 1. stack frames allocated from the small object lists
+// can avoid zeroing altogether.
+// 2. the cost of zeroing when reusing a small object is
+// charged to the mutator, not the garbage collector.
+//
// This C code was written with an eye toward translating to Go
// in the future. Methods have the form Type_Method(Type *t, ...).
-
typedef struct FixAlloc FixAlloc;
typedef struct MCentral MCentral;
typedef struct MHeap MHeap;
@@ -218,7 +230,7 @@ struct MCache
uint64 size;
};
-void* MCache_Alloc(MCache *c, int32 sizeclass, uintptr size);
+void* MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
void MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
@@ -285,7 +297,7 @@ struct MHeap
// span lookup
MHeapMap map;
MHeapMapCache mapcache;
-
+
// range of addresses we might see in the heap
byte *min;
byte *max;
@@ -310,7 +322,7 @@ void MHeap_Free(MHeap *h, MSpan *s);
MSpan* MHeap_Lookup(MHeap *h, PageID p);
MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
-void* mallocgc(uintptr size, uint32 flag, int32 dogc);
+void* mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
void gc(int32 force);
@@ -329,5 +341,6 @@ enum
RefNone, // no references
RefSome, // some references
RefFinalize, // ready to be finalized
- RefNoPointers = 0x80000000U, // flag - no pointers here
+ RefNoPointers = 0x80000000U, // flag - no pointers here
+ RefHasFinalizer = 0x40000000U, // flag - has finalizer
};