aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2024-09-16 15:58:36 -0400
committerMichael Pratt <mpratt@google.com>2024-09-17 17:01:20 +0000
commit4f881115d4067bda8a236aabcae8c41cdd13b4d0 (patch)
tree2249bfaec1950819e5e22184e61e60dcd076ae40 /src/runtime
parent41ca2637d4df8b8edf63436c6caab56821d2af38 (diff)
downloadgo-4f881115d4067bda8a236aabcae8c41cdd13b4d0.tar.xz
runtime: move getcallersp to internal/runtime/sys
Moving these intrinsics to a base package enables other internal/runtime packages to use them. For #54766. Change-Id: I45a530422207dd94b5ad4eee51216c9410a84040 Reviewed-on: https://go-review.googlesource.com/c/go/+/613261 Reviewed-by: Cherry Mui <cherryyz@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asan.go8
-rw-r--r--src/runtime/debugcall.go2
-rw-r--r--src/runtime/export_test.go4
-rw-r--r--src/runtime/export_windows_test.go2
-rw-r--r--src/runtime/lock_js.go9
-rw-r--r--src/runtime/mprof.go8
-rw-r--r--src/runtime/os2_aix.go14
-rw-r--r--src/runtime/os_solaris.go14
-rw-r--r--src/runtime/os_windows.go2
-rw-r--r--src/runtime/panic.go26
-rw-r--r--src/runtime/proc.go16
-rw-r--r--src/runtime/signal_unix.go4
-rw-r--r--src/runtime/stubs.go26
-rw-r--r--src/runtime/stubs_wasm.go4
-rw-r--r--src/runtime/sys_libc.go2
-rw-r--r--src/runtime/traceback.go6
16 files changed, 62 insertions, 85 deletions
diff --git a/src/runtime/asan.go b/src/runtime/asan.go
index 76b958efbb..6fb1d00c3c 100644
--- a/src/runtime/asan.go
+++ b/src/runtime/asan.go
@@ -13,13 +13,13 @@ import (
// Public address sanitizer API.
func ASanRead(addr unsafe.Pointer, len int) {
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
doasanread(addr, uintptr(len), sp, pc)
}
func ASanWrite(addr unsafe.Pointer, len int) {
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
doasanwrite(addr, uintptr(len), sp, pc)
}
@@ -33,7 +33,7 @@ const asanenabled = true
//go:linkname asanread
//go:nosplit
func asanread(addr unsafe.Pointer, sz uintptr) {
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
doasanread(addr, sz, sp, pc)
}
@@ -41,7 +41,7 @@ func asanread(addr unsafe.Pointer, sz uintptr) {
//go:linkname asanwrite
//go:nosplit
func asanwrite(addr unsafe.Pointer, sz uintptr) {
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
doasanwrite(addr, sz, sp, pc)
}
diff --git a/src/runtime/debugcall.go b/src/runtime/debugcall.go
index 8d0174e6ae..e6554475c0 100644
--- a/src/runtime/debugcall.go
+++ b/src/runtime/debugcall.go
@@ -35,7 +35,7 @@ func debugCallCheck(pc uintptr) string {
if getg() != getg().m.curg {
return debugCallSystemStack
}
- if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
+ if sp := sys.GetCallerSP(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
// Fast syscalls (nanotime) and racecall switch to the
// g0 stack without switching g. We can't safely make
// a call in this state. (We can't even safely
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index e5f571814b..3bde1aea29 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -504,7 +504,7 @@ func LockOSCounts() (external, internal uint32) {
//go:noinline
func TracebackSystemstack(stk []uintptr, i int) int {
if i == 0 {
- pc, sp := sys.GetCallerPC(), getcallersp()
+ pc, sp := sys.GetCallerPC(), sys.GetCallerSP()
var u unwinder
u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
return tracebackPCs(&u, 0, stk)
@@ -587,7 +587,7 @@ func unexportedPanicForTesting(b []byte, i int) byte {
func G0StackOverflow() {
systemstack(func() {
g0 := getg()
- sp := getcallersp()
+ sp := sys.GetCallerSP()
// The stack bounds for g0 stack is not always precise.
// Use an artificially small stack, to trigger a stack overflow
// without actually run out of the system stack (which may seg fault).
diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go
index 6e98fe9789..13d30d4bc4 100644
--- a/src/runtime/export_windows_test.go
+++ b/src/runtime/export_windows_test.go
@@ -35,7 +35,7 @@ func (c ContextStub) GetPC() uintptr {
func NewContextStub() *ContextStub {
var ctx context
ctx.set_ip(sys.GetCallerPC())
- ctx.set_sp(getcallersp())
+ ctx.set_sp(sys.GetCallerSP())
ctx.set_fp(getcallerfp())
return &ContextStub{ctx}
}
diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go
index f19e20a4c3..e70a881895 100644
--- a/src/runtime/lock_js.go
+++ b/src/runtime/lock_js.go
@@ -6,7 +6,10 @@
package runtime
-import _ "unsafe" // for go:linkname
+import (
+ "internal/runtime/sys"
+ _ "unsafe" // for go:linkname
+)
// js/wasm has no support for threads yet. There is no preemption.
@@ -244,7 +247,7 @@ var idleStart int64
func handleAsyncEvent() {
idleStart = nanotime()
- pause(getcallersp() - 16)
+ pause(sys.GetCallerSP() - 16)
}
// clearIdleTimeout clears our record of the timeout started by beforeIdle.
@@ -291,7 +294,7 @@ func handleEvent() {
// return execution to JavaScript
idleStart = nanotime()
- pause(getcallersp() - 16)
+ pause(sys.GetCallerSP() - 16)
}
// eventHandler retrieves and executes handlers for pending JavaScript events.
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 6b6e896e9d..d84f8d26ea 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -810,7 +810,7 @@ func (prof *mLockProfile) captureStack() {
var nstk int
gp := getg()
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
var u unwinder
@@ -1401,7 +1401,7 @@ func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels
}
// Save current goroutine.
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
saveg(pc, sp, ourg, &p[0], pcbuf)
@@ -1597,7 +1597,7 @@ func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsa
r, lbl := p, labels
// Save current goroutine.
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
saveg(pc, sp, gp, &r[0], pcbuf)
@@ -1699,7 +1699,7 @@ func Stack(buf []byte, all bool) int {
n := 0
if len(buf) > 0 {
gp := getg()
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
systemstack(func() {
g0 := getg()
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 39fa9fbf73..51758bd304 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -186,7 +186,7 @@ func syscall0(fn *libFunc) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
@@ -217,7 +217,7 @@ func syscall1(fn *libFunc, a0 uintptr) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
@@ -249,7 +249,7 @@ func syscall2(fn *libFunc, a0, a1 uintptr) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
@@ -281,7 +281,7 @@ func syscall3(fn *libFunc, a0, a1, a2 uintptr) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
@@ -313,7 +313,7 @@ func syscall4(fn *libFunc, a0, a1, a2, a3 uintptr) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
@@ -345,7 +345,7 @@ func syscall5(fn *libFunc, a0, a1, a2, a3, a4 uintptr) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
@@ -377,7 +377,7 @@ func syscall6(fn *libFunc, a0, a1, a2, a3, a4, a5 uintptr) (r, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
resetLibcall = false // See comment in sys_darwin.go:libcCall
}
diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go
index 8cb9869925..5f6163f131 100644
--- a/src/runtime/os_solaris.go
+++ b/src/runtime/os_solaris.go
@@ -48,7 +48,7 @@ func sysvicall0(fn *libcFunc) uintptr {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil // See comment in sys_darwin.go:libcCall
}
@@ -86,7 +86,7 @@ func sysvicall1Err(fn *libcFunc, a1 uintptr) (r1, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil
}
@@ -126,7 +126,7 @@ func sysvicall2Err(fn *libcFunc, a1, a2 uintptr) (uintptr, uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil
}
@@ -165,7 +165,7 @@ func sysvicall3Err(fn *libcFunc, a1, a2, a3 uintptr) (r1, err uintptr) {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil
}
@@ -195,7 +195,7 @@ func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil
}
@@ -225,7 +225,7 @@ func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil
}
@@ -255,7 +255,7 @@ func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
mp = nil
}
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index 1961d68ad8..7183e79f7d 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -965,7 +965,7 @@ func stdcall(fn stdFunction) uintptr {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
resetLibcall = true // See comment in sys_darwin.go:libcCall
}
asmcgocall(asmstdcallAddr, unsafe.Pointer(&mp.libcall))
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index e74a7feb05..5b62e019d9 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -281,10 +281,10 @@ func deferproc(fn func()) {
gp._defer = d
d.fn = fn
d.pc = sys.GetCallerPC()
- // We must not be preempted between calling getcallersp and
- // storing it to d.sp because getcallersp's result is a
+ // We must not be preempted between calling GetCallerSP and
+ // storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
- d.sp = getcallersp()
+ d.sp = sys.GetCallerSP()
// deferproc returns 0 normally.
// a deferred func that stops a panic
@@ -395,10 +395,10 @@ func deferrangefunc() any {
d.link = gp._defer
gp._defer = d
d.pc = sys.GetCallerPC()
- // We must not be preempted between calling getcallersp and
- // storing it to d.sp because getcallersp's result is a
+ // We must not be preempted between calling GetCallerSP and
+ // storing it to d.sp because GetCallerSP's result is a
// uintptr stack pointer.
- d.sp = getcallersp()
+ d.sp = sys.GetCallerSP()
d.rangefunc = true
d.head = new(atomic.Pointer[_defer])
@@ -484,7 +484,7 @@ func deferprocStack(d *_defer) {
// are initialized here.
d.heap = false
d.rangefunc = false
- d.sp = getcallersp()
+ d.sp = sys.GetCallerSP()
d.pc = sys.GetCallerPC()
// The lines below implement:
// d.panic = nil
@@ -596,7 +596,7 @@ func deferreturn() {
var p _panic
p.deferreturn = true
- p.start(sys.GetCallerPC(), unsafe.Pointer(getcallersp()))
+ p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
@@ -622,7 +622,7 @@ func Goexit() {
var p _panic
p.goexit = true
- p.start(sys.GetCallerPC(), unsafe.Pointer(getcallersp()))
+ p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
@@ -778,7 +778,7 @@ func gopanic(e any) {
runningPanicDefers.Add(1)
- p.start(sys.GetCallerPC(), unsafe.Pointer(getcallersp()))
+ p.start(sys.GetCallerPC(), unsafe.Pointer(sys.GetCallerSP()))
for {
fn, ok := p.nextDefer()
if !ok {
@@ -818,7 +818,7 @@ func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
// can restart its defer processing loop if a recovered panic tries
// to jump past it.
p.startPC = sys.GetCallerPC()
- p.startSP = unsafe.Pointer(getcallersp())
+ p.startSP = unsafe.Pointer(sys.GetCallerSP())
if p.deferreturn {
p.sp = sp
@@ -1228,7 +1228,7 @@ func recovery(gp *g) {
//go:nosplit
func fatalthrow(t throwType) {
pc := sys.GetCallerPC()
- sp := getcallersp()
+ sp := sys.GetCallerSP()
gp := getg()
if gp.m.throwing == throwTypeNone {
@@ -1264,7 +1264,7 @@ func fatalthrow(t throwType) {
//go:nosplit
func fatalpanic(msgs *_panic) {
pc := sys.GetCallerPC()
- sp := getcallersp()
+ sp := sys.GetCallerSP()
gp := getg()
var docrash bool
// Switch to the system stack to avoid any stack growth, which
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 8f5919bbf6..7ff339ea46 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -274,7 +274,7 @@ func main() {
// Using the caller's SP unwinds this frame and backs to
// goexit. The -16 is: 8 for goexit's (fake) return PC,
// and pause's epilogue pops 8.
- pause(getcallersp() - 16) // should not return
+ pause(sys.GetCallerSP() - 16) // should not return
panic("unreachable")
}
return
@@ -1811,7 +1811,7 @@ func mstart0() {
mexit(osStack)
}
-// The go:noinline is to guarantee the sys.GetCallerPC/getcallersp below are safe,
+// The go:noinline is to guarantee the sys.GetCallerPC/sys.GetCallerSP below are safe,
// so that we can set up g0.sched to return to the call of mstart1 above.
//
//go:noinline
@@ -1830,7 +1830,7 @@ func mstart1() {
// and let mstart0 exit the thread.
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.sched.pc = sys.GetCallerPC()
- gp.sched.sp = getcallersp()
+ gp.sched.sp = sys.GetCallerSP()
asminit()
minit()
@@ -2329,7 +2329,7 @@ func needm(signal bool) {
// Install g (= m->g0) and set the stack bounds
// to match the current stack.
setg(mp.g0)
- sp := getcallersp()
+ sp := sys.GetCallerSP()
callbackUpdateSystemStack(mp, sp, signal)
// Should mark we are already in Go now.
@@ -4496,7 +4496,7 @@ func entersyscall() {
// the stack. This results in exceeding the nosplit stack requirements
// on some platforms.
fp := getcallerfp()
- reentersyscall(sys.GetCallerPC(), getcallersp(), fp)
+ reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
}
func entersyscall_sysmon() {
@@ -4562,7 +4562,7 @@ func entersyscallblock() {
// Leave SP around for GC and traceback.
pc := sys.GetCallerPC()
- sp := getcallersp()
+ sp := sys.GetCallerSP()
bp := getcallerfp()
save(pc, sp, bp)
gp.syscallsp = gp.sched.sp
@@ -4594,7 +4594,7 @@ func entersyscallblock() {
systemstack(entersyscallblock_handoff)
// Resave for traceback during blocked call.
- save(sys.GetCallerPC(), getcallersp(), getcallerfp())
+ save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
gp.m.locks--
}
@@ -4632,7 +4632,7 @@ func exitsyscall() {
gp := getg()
gp.m.locks++ // see comment in entersyscall
- if getcallersp() > gp.syscallsp {
+ if sys.GetCallerSP() > gp.syscallsp {
throw("exitsyscall: syscall frame is no longer valid")
}
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index a42972bb35..a6373093b5 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -405,7 +405,7 @@ func sigFetchG(c *sigctxt) *g {
// bottom of the signal stack. Fetch from there.
// TODO: in efence mode, stack is sysAlloc'd, so this wouldn't
// work.
- sp := getcallersp()
+ sp := sys.GetCallerSP()
s := spanOf(sp)
if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
gp := *(**g)(unsafe.Pointer(s.base()))
@@ -479,7 +479,7 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
var gsignalStack gsignalStack
setStack := adjustSignalStack(sig, gp.m, &gsignalStack)
if setStack {
- gp.m.gsignal.stktopsp = getcallersp()
+ gp.m.gsignal.stktopsp = sys.GetCallerSP()
}
if gp.stackguard0 == stackFork {
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index ccb2e7f931..84f478db07 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -307,32 +307,6 @@ func goexit(neverCallThisFunction)
// data dependency ordering.
func publicationBarrier()
-// getcallerpc returns the program counter (PC) of its caller's caller.
-// getcallersp returns the stack pointer (SP) of its caller's caller.
-// Both are implemented as intrinsics on every platform.
-//
-// For example:
-//
-// func f(arg1, arg2, arg3 int) {
-// pc := getcallerpc()
-// sp := getcallersp()
-// }
-//
-// These two lines find the PC and SP immediately following
-// the call to f (where f will return).
-//
-// The call to getcallerpc and getcallersp must be done in the
-// frame being asked about.
-//
-// The result of getcallersp is correct at the time of the return,
-// but it may be invalidated by any subsequent call to a function
-// that might relocate the stack in order to grow or shrink it.
-// A general rule is that the result of getcallersp should be used
-// immediately and can only be passed to nosplit functions.
-
-
-func getcallersp() uintptr
-
// getclosureptr returns the pointer to the current closure.
// getclosureptr can only be used in an assignment statement
// at the entry of a function. Moreover, go:nosplit directive
diff --git a/src/runtime/stubs_wasm.go b/src/runtime/stubs_wasm.go
index 75078b53eb..fafc923b76 100644
--- a/src/runtime/stubs_wasm.go
+++ b/src/runtime/stubs_wasm.go
@@ -11,6 +11,6 @@ package runtime
// returning to the host, the SP is newsp+8.
// If we want to set the SP such that when it calls back into Go, the
// Go function appears to be called from pause's caller's caller, then
-// call pause with newsp = getcallersp()-16 (another 8 is the return
-// PC pushed to the stack).
+// call pause with newsp = internal/runtime/sys.GetCallerSP()-16 (another 8 is
+// the return PC pushed to the stack).
func pause(newsp uintptr)
diff --git a/src/runtime/sys_libc.go b/src/runtime/sys_libc.go
index 556f388662..72d8991559 100644
--- a/src/runtime/sys_libc.go
+++ b/src/runtime/sys_libc.go
@@ -29,7 +29,7 @@ func libcCall(fn, arg unsafe.Pointer) int32 {
mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
+ mp.libcallsp = sys.GetCallerSP()
} else {
// Make sure we don't reset libcallsp. This makes
// libcCall reentrant; We remember the g/pc/sp for the
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index ee6a7e7acc..95a57bd2b7 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -143,7 +143,7 @@ func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) {
// on another stack. That could confuse callers quite a bit.
// Instead, we require that initAt and any other function that
// accepts an sp for the current goroutine (typically obtained by
- // calling getcallersp) must not run on that goroutine's stack but
+ // calling GetCallerSP) must not run on that goroutine's stack but
// instead on the g0 stack.
throw("cannot trace user goroutine on its own stack")
}
@@ -804,7 +804,7 @@ func traceback(pc, sp, lr uintptr, gp *g) {
}
// tracebacktrap is like traceback but expects that the PC and SP were obtained
-// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or GetCallerPC/getcallersp.
+// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or GetCallerPC/GetCallerSP.
// Because they are from a trap instead of from a saved pair,
// the initial PC must not be rewound to the previous instruction.
// (All the saved pairs record a PC that is a return address, so we
@@ -1090,7 +1090,7 @@ func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) {
//
//go:linkname callers
func callers(skip int, pcbuf []uintptr) int {
- sp := getcallersp()
+ sp := sys.GetCallerSP()
pc := sys.GetCallerPC()
gp := getg()
var n int