aboutsummaryrefslogtreecommitdiff
path: root/src/pkg/runtime/proc.c
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2013-06-12 08:49:38 -0400
committerRuss Cox <rsc@golang.org>2013-06-12 08:49:38 -0400
commite58f798c0c97c252e0b800a4223d20790ef6b166 (patch)
tree0a5611f4e3ea22708feef51a33c8862bf0e773e7 /src/pkg/runtime/proc.c
parent7ea75a5f188ff23fee5130199e89408c52ee59d1 (diff)
downloadgo-e58f798c0c97c252e0b800a4223d20790ef6b166.tar.xz
runtime: adjust traceback / garbage collector boundary
The garbage collection routine addframeroots is duplicating logic in the traceback routine that calls it, sometimes correctly, sometimes incorrectly, sometimes incompletely. Pass necessary information to addframeroots instead of deriving it anew. Should make addframeroots significantly more robust. It's certainly smaller. Also try to standardize on uintptr for saved pc, sp values. Will make CL 10036044 trivial. R=golang-dev, dave, dvyukov CC=golang-dev https://golang.org/cl/10169045
Diffstat (limited to 'src/pkg/runtime/proc.c')
-rw-r--r--src/pkg/runtime/proc.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/src/pkg/runtime/proc.c b/src/pkg/runtime/proc.c
index bc9ca50dc7..432298a9ca 100644
--- a/src/pkg/runtime/proc.c
+++ b/src/pkg/runtime/proc.c
@@ -241,7 +241,7 @@ runtime·tracebackothers(G *me)
continue;
runtime·printf("\n");
runtime·goroutineheader(gp);
- runtime·traceback(gp->sched.pc, (byte*)gp->sched.sp, 0, gp);
+ runtime·traceback(gp->sched.pc, gp->sched.sp, 0, gp);
}
}
@@ -473,7 +473,7 @@ runtime·mstart(void)
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
runtime·gosave(&m->g0->sched);
- m->g0->sched.pc = (void*)-1; // make sure it is never used
+ m->g0->sched.pc = (uintptr)-1; // make sure it is never used
m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, copy it to stackguard
m->seh = &seh;
runtime·asminit();
@@ -651,7 +651,7 @@ runtime·newextram(void)
// the goroutine stack ends.
mp = runtime·allocm(nil);
gp = runtime·malg(4096);
- gp->sched.pc = (void*)runtime·goexit;
+ gp->sched.pc = (uintptr)runtime·goexit;
gp->sched.sp = gp->stackbase;
gp->sched.g = gp;
gp->status = Gsyscall;
@@ -997,7 +997,7 @@ execute(G *gp)
if(m->profilehz != hz)
runtime·resetcpuprofiler(hz);
- if(gp->sched.pc == (byte*)runtime·goexit) // kickoff
+ if(gp->sched.pc == (uintptr)runtime·goexit) // kickoff
runtime·gogocallfn(&gp->sched, gp->fnstart);
runtime·gogo(&gp->sched, 0);
}
@@ -1281,7 +1281,7 @@ void
// Leave SP around for gc and traceback.
g->sched.sp = (uintptr)runtime·getcallersp(&dummy);
- g->sched.pc = runtime·getcallerpc(&dummy);
+ g->sched.pc = (uintptr)runtime·getcallerpc(&dummy);
g->sched.g = g;
g->gcsp = g->sched.sp;
g->gcpc = g->sched.pc;
@@ -1330,8 +1330,8 @@ void
runtime·setprof(false);
// Leave SP around for gc and traceback.
- g->sched.sp = (uintptr)runtime·getcallersp(&dummy);
- g->sched.pc = runtime·getcallerpc(&dummy);
+ g->sched.sp = runtime·getcallersp(&dummy);
+ g->sched.pc = (uintptr)runtime·getcallerpc(&dummy);
g->sched.g = g;
g->gcsp = g->sched.sp;
g->gcpc = g->sched.pc;
@@ -1548,14 +1548,14 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
}
newg->sched.sp = (uintptr)sp;
- newg->sched.pc = (byte*)runtime·goexit;
+ newg->sched.pc = (uintptr)runtime·goexit;
newg->sched.g = newg;
newg->fnstart = fn;
newg->gopc = (uintptr)callerpc;
newg->status = Grunnable;
newg->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
if(raceenabled)
- newg->racectx = runtime·racegostart(callerpc);
+ newg->racectx = runtime·racegostart((void*)callerpc);
runqput(m->p, newg);
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
@@ -1802,7 +1802,7 @@ runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp)
runtime·unlock(&prof);
return;
}
- n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil);
+ n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil);
if(n > 0)
prof.fn(prof.pcbuf, n);
runtime·unlock(&prof);