aboutsummaryrefslogtreecommitdiff
path: root/src/pkg/runtime/runtime.h
diff options
context:
space:
mode:
authorKeith Randall <khr@golang.org>2014-07-17 14:41:46 -0700
committerKeith Randall <khr@golang.org>2014-07-17 14:41:46 -0700
commitf378f300345431204d5842751db2add7994d9957 (patch)
treedc343a417545556013848a33fd49797d2197582e /src/pkg/runtime/runtime.h
parent6b2aabeecc1db46d030b9c5c5553c4e0fabba0cf (diff)
downloadgo-f378f300345431204d5842751db2add7994d9957.tar.xz
undo CL 101570044 / 2c57aaea79c4
redo stack allocation. This is mostly the same as the original CL with a few bug fixes. 1. add racemalloc() for stack allocations 2. fix poolalloc/poolfree to terminate free lists correctly. 3. adjust span ref count correctly. 4. don't use cache for sizes >= StackCacheSize. Should fix bugs and memory leaks in original changelist. ««« original CL description undo CL 104200047 / 318b04f28372 Breaks windows and race detector. TBR=rsc ««« original CL description runtime: stack allocator, separate from mallocgc In order to move malloc to Go, we need to have a separate stack allocator. If we run out of stack during malloc, malloc will not be available to allocate a new stack. Stacks are the last remaining FlagNoGC objects in the GC heap. Once they are out, we can get rid of the distinction between the allocated/blockboundary bits. (This will be in a separate change.) Fixes #7468 Fixes #7424 LGTM=rsc, dvyukov R=golang-codereviews, dvyukov, khr, dave, rsc CC=golang-codereviews https://golang.org/cl/104200047 »»» TBR=rsc CC=golang-codereviews https://golang.org/cl/101570044 »»» LGTM=dvyukov R=dvyukov, dave, khr, alex.brainman CC=golang-codereviews https://golang.org/cl/112240044
Diffstat (limited to 'src/pkg/runtime/runtime.h')
-rw-r--r--src/pkg/runtime/runtime.h15
1 files changed, 3 insertions, 12 deletions
diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h
index 2fab69b3a2..e06103abe3 100644
--- a/src/pkg/runtime/runtime.h
+++ b/src/pkg/runtime/runtime.h
@@ -146,13 +146,6 @@ enum
{
PtrSize = sizeof(void*),
};
-enum
-{
- // Per-M stack segment cache size.
- StackCacheSize = 32,
- // Global <-> per-M stack segment cache transfer batch size.
- StackCacheBatch = 16,
-};
/*
* structures
*/
@@ -326,10 +319,6 @@ struct M
M* schedlink;
uint32 machport; // Return address for Mach IPC (OS X)
MCache* mcache;
- int32 stackinuse;
- uint32 stackcachepos;
- uint32 stackcachecnt;
- void* stackcache[StackCacheSize];
G* lockedg;
uintptr createstack[32];// Stack that created this thread.
uint32 freglo[16]; // D[i] lsb and F[i]
@@ -346,6 +335,8 @@ struct M
bool (*waitunlockf)(G*, void*);
void* waitlock;
uintptr forkstackguard;
+ uintptr scalararg[4]; // scalar argument/return for mcall
+ void* ptrarg[4]; // pointer argument/return for mcall
#ifdef GOOS_windows
void* thread; // thread handle
// these are here because they are too large to be on the stack
@@ -428,7 +419,6 @@ struct Stktop
uint8* argp; // pointer to arguments in old frame
bool panic; // is this frame the top of a panic?
- bool malloced;
};
struct SigTab
{
@@ -866,6 +856,7 @@ int32 runtime·funcarglen(Func*, uintptr);
int32 runtime·funcspdelta(Func*, uintptr);
int8* runtime·funcname(Func*);
int32 runtime·pcdatavalue(Func*, int32, uintptr);
+void runtime·stackinit(void);
void* runtime·stackalloc(G*, uint32);
void runtime·stackfree(G*, void*, Stktop*);
void runtime·shrinkstack(G*);