aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2024-04-18 20:54:55 +0000
committerMichael Knyszek <mknyszek@google.com>2024-04-19 17:25:00 +0000
commit2b82a4f488179a62a69dd318ea62f0624641ae63 (patch)
treee86da95881d3bd61e9e6f9065b4ca658f4974487 /src/runtime/proc.go
parentdcb5de5cac5baee703b1fe215f28f22aebc93437 (diff)
downloadgo-2b82a4f488179a62a69dd318ea62f0624641ae63.tar.xz
runtime: track frame pointer while in syscall
Currently the runtime only tracks the PC and SP upon entering a syscall, but not the FP (BP). This is mainly for historical reasons, and because the tracer (which uses the frame pointer unwinder) does not need it. Until it did, of course, in CL 567076, where the tracer tries to take a stack trace of a goroutine that's in a syscall from afar. It tries to use gp.sched.bp and lots of things go wrong. It *really* should be using the equivalent of gp.syscallbp, which doesn't exist before this CL. This change introduces gp.syscallbp and tracks it. It also introduces getcallerfp which is nice for simplifying some code. Because we now have gp.syscallbp, we can also delete the frame skip count computation in traceLocker.GoSysCall, because it's now the same regardless of whether frame pointer unwinding is used. Fixes #66889. Change-Id: Ib6d761c9566055e0a037134138cb0f81be73ecf7 Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-nocgo Reviewed-on: https://go-review.googlesource.com/c/go/+/580255 Auto-Submit: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com>
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go30
1 files changed, 19 insertions, 11 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 8f5787dbbb..cb5a80455d 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -4237,7 +4237,7 @@ func gdestroy(gp *g) {
//
//go:nosplit
//go:nowritebarrierrec
-func save(pc, sp uintptr) {
+func save(pc, sp, bp uintptr) {
gp := getg()
if gp == gp.m.g0 || gp == gp.m.gsignal {
@@ -4253,6 +4253,7 @@ func save(pc, sp uintptr) {
gp.sched.sp = sp
gp.sched.lr = 0
gp.sched.ret = 0
+ gp.sched.bp = bp
// We need to ensure ctxt is zero, but can't have a write
// barrier here. However, it should always already be zero.
// Assert that.
@@ -4285,7 +4286,7 @@ func save(pc, sp uintptr) {
// entry point for syscalls, which obtains the SP and PC from the caller.
//
//go:nosplit
-func reentersyscall(pc, sp uintptr) {
+func reentersyscall(pc, sp, bp uintptr) {
trace := traceAcquire()
gp := getg()
@@ -4301,14 +4302,15 @@ func reentersyscall(pc, sp uintptr) {
gp.throwsplit = true
// Leave SP around for GC and traceback.
- save(pc, sp)
+ save(pc, sp, bp)
gp.syscallsp = sp
gp.syscallpc = pc
+ gp.syscallbp = bp
casgstatus(gp, _Grunning, _Gsyscall)
if staticLockRanking {
// When doing static lock ranking casgstatus can call
// systemstack which clobbers g.sched.
- save(pc, sp)
+ save(pc, sp, bp)
}
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
@@ -4325,18 +4327,18 @@ func reentersyscall(pc, sp uintptr) {
// systemstack itself clobbers g.sched.{pc,sp} and we might
// need them later when the G is genuinely blocked in a
// syscall
- save(pc, sp)
+ save(pc, sp, bp)
}
if sched.sysmonwait.Load() {
systemstack(entersyscall_sysmon)
- save(pc, sp)
+ save(pc, sp, bp)
}
if gp.m.p.ptr().runSafePointFn != 0 {
// runSafePointFn may stack split if run on this stack
systemstack(runSafePointFn)
- save(pc, sp)
+ save(pc, sp, bp)
}
gp.m.syscalltick = gp.m.p.ptr().syscalltick
@@ -4347,7 +4349,7 @@ func reentersyscall(pc, sp uintptr) {
atomic.Store(&pp.status, _Psyscall)
if sched.gcwaiting.Load() {
systemstack(entersyscall_gcwait)
- save(pc, sp)
+ save(pc, sp, bp)
}
gp.m.locks--
@@ -4360,7 +4362,12 @@ func reentersyscall(pc, sp uintptr) {
//go:nosplit
//go:linkname entersyscall
func entersyscall() {
- reentersyscall(getcallerpc(), getcallersp())
+ // N.B. getcallerfp cannot be written directly as argument in the call
+ // to reentersyscall because it forces spilling the other arguments to
+ // the stack. This results in exceeding the nosplit stack requirements
+ // on some platforms.
+ fp := getcallerfp()
+ reentersyscall(getcallerpc(), getcallersp(), fp)
}
func entersyscall_sysmon() {
@@ -4418,7 +4425,8 @@ func entersyscallblock() {
// Leave SP around for GC and traceback.
pc := getcallerpc()
sp := getcallersp()
- save(pc, sp)
+ bp := getcallerfp()
+ save(pc, sp, bp)
gp.syscallsp = gp.sched.sp
gp.syscallpc = gp.sched.pc
if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
@@ -4441,7 +4449,7 @@ func entersyscallblock() {
systemstack(entersyscallblock_handoff)
// Resave for traceback during blocked call.
- save(getcallerpc(), getcallersp())
+ save(getcallerpc(), getcallersp(), getcallerfp())
gp.m.locks--
}