aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2022-01-30 20:13:43 -0500
committerRuss Cox <rsc@golang.org>2022-04-05 17:54:15 +0000
commit9839668b5619f45e293dd40339bf0ac614ea6bee (patch)
treea836ea07d0a9ec5e32638d060cdeb6b4ded636dc /src/runtime/proc.go
parent81431c7aa7c5d782e72dec342442ea7664ef1783 (diff)
downloadgo-9839668b5619f45e293dd40339bf0ac614ea6bee.tar.xz
all: separate doc comment from //go: directives
A future change to gofmt will rewrite // Doc comment. //go:foo to // Doc comment. // //go:foo Apply that change preemptively to all comments (not necessarily just doc comments). For #51082. Change-Id: Iffe0285418d1e79d34526af3520b415a12203ca9 Reviewed-on: https://go-review.googlesource.com/c/go/+/384260 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org>
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go30
1 files changed, 30 insertions, 0 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index f9f82f3867..ae4440786e 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -277,6 +277,7 @@ func main() {
}
// os_beforeExit is called from os.Exit(0).
+//
//go:linkname os_beforeExit os.runtime_beforeExit
func os_beforeExit() {
if raceenabled {
@@ -319,6 +320,7 @@ func Gosched() {
// goschedguarded yields the processor like gosched, but also checks
// for forbidden states and opts out of the yield in those cases.
+//
//go:nosplit
func goschedguarded() {
mcall(goschedguarded_m)
@@ -894,6 +896,7 @@ func freezetheworld() {
// All reads and writes of g's status go through readgstatus, casgstatus
// castogscanstatus, casfrom_Gscanstatus.
+//
//go:nosplit
func readgstatus(gp *g) uint32 {
return atomic.Load(&gp.atomicstatus)
@@ -955,6 +958,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
// and casfrom_Gscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
// put it in the Gscan state is finished.
+//
//go:nosplit
func casgstatus(gp *g, oldval, newval uint32) {
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
@@ -1028,6 +1032,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
// async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
// it might have become Grunnable by the time we get to the cas. If we called casgstatus,
// it would loop waiting for the status to go back to Gwaiting, which it never will.
+//
//go:nosplit
func casgcopystack(gp *g) uint32 {
for {
@@ -1387,6 +1392,7 @@ func mstart0() {
// The go:noinline is to guarantee the getcallerpc/getcallersp below are safe,
// so that we can set up g0.sched to return to the call of mstart1 above.
+//
//go:noinline
func mstart1() {
_g_ := getg()
@@ -1443,6 +1449,7 @@ func mstartm0() {
}
// mPark causes a thread to park itself, returning once woken.
+//
//go:nosplit
func mPark() {
gp := getg()
@@ -1795,6 +1802,7 @@ func allocm(_p_ *p, fn func(), id int64) *m {
//
// When the callback is done with the m, it calls dropm to
// put the m back on the list.
+//
//go:nosplit
func needm() {
if (iscgo || GOOS == "windows") && !cgoHasExtraM {
@@ -2000,6 +2008,7 @@ var extraMWaiters uint32
// to extram. If nilokay is true, then lockextra will
// return a nil list head if that's what it finds. If nilokay is false,
// lockextra will keep waiting until the list head is no longer nil.
+//
//go:nosplit
func lockextra(nilokay bool) *m {
const locked = 1
@@ -2073,6 +2082,7 @@ var newmHandoff struct {
// May run with m.p==nil, so write barriers are not allowed.
//
// id is optional pre-allocated m ID. Omit by passing -1.
+//
//go:nowritebarrierrec
func newm(fn func(), _p_ *p, id int64) {
// allocm adds a new M to allm, but they do not start until created by
@@ -2245,6 +2255,7 @@ func mspinning() {
// comment on acquirem below.
//
// Must not have write barriers because this may be called without a P.
+//
//go:nowritebarrierrec
func startm(_p_ *p, spinning bool) {
// Disable preemption.
@@ -2329,6 +2340,7 @@ func startm(_p_ *p, spinning bool) {
// Hands off P from syscall or locked M.
// Always runs without a P, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func handoffp(_p_ *p) {
// handoffp must start an M in any situation where
@@ -2432,6 +2444,7 @@ func stoplockedm() {
// Schedules the locked m to run the locked gp.
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func startlockedm(gp *g) {
_g_ := getg()
@@ -3248,6 +3261,7 @@ func dropg() {
// If the time when the next timer should run is not 0,
// it is always larger than the returned time.
// We pass now in and out to avoid extra calls of nanotime.
+//
//go:yeswritebarrierrec
func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
// If it's not yet time for the first timer, or the first adjusted
@@ -3680,6 +3694,7 @@ func entersyscall_gcwait() {
}
// The same as entersyscall(), but with a hint that the syscall is blocking.
+//
//go:nosplit
func entersyscallblock() {
_g_ := getg()
@@ -3939,6 +3954,7 @@ func exitsyscall0(gp *g) {
}
// Called from syscall package before fork.
+//
//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
//go:nosplit
func syscall_runtime_BeforeFork() {
@@ -3959,6 +3975,7 @@ func syscall_runtime_BeforeFork() {
}
// Called from syscall package after fork in parent.
+//
//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
//go:nosplit
func syscall_runtime_AfterFork() {
@@ -4009,6 +4026,7 @@ func syscall_runtime_AfterForkInChild() {
var pendingPreemptSignals uint32
// Called from syscall package before Exec.
+//
//go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
func syscall_runtime_BeforeExec() {
// Prevent thread creation during exec.
@@ -4024,6 +4042,7 @@ func syscall_runtime_BeforeExec() {
}
// Called from syscall package after Exec.
+//
//go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
func syscall_runtime_AfterExec() {
execLock.unlock()
@@ -4305,6 +4324,7 @@ func Breakpoint() {
// dolockOSThread is called by LockOSThread and lockOSThread below
// after they modify m.locked. Do not allow preemption during this call,
// or else the m might be different in this function than in the caller.
+//
//go:nosplit
func dolockOSThread() {
if GOARCH == "wasm" {
@@ -4356,6 +4376,7 @@ func lockOSThread() {
// dounlockOSThread is called by UnlockOSThread and unlockOSThread below
// after they update m->locked. Do not allow preemption during this call,
// or else the m might be in different in this function than in the caller.
+//
//go:nosplit
func dounlockOSThread() {
if GOARCH == "wasm" {
@@ -4438,6 +4459,7 @@ func _VDSO() { _VDSO() }
// Called if we receive a SIGPROF signal.
// Called by the signal handler, may run during STW.
+//
//go:nowritebarrierrec
func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
if prof.hz == 0 {
@@ -5446,6 +5468,7 @@ func schedEnabled(gp *g) bool {
// Put mp on midle list.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func mput(mp *m) {
assertLockHeld(&sched.lock)
@@ -5459,6 +5482,7 @@ func mput(mp *m) {
// Try to get an m from midle list.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func mget() *m {
assertLockHeld(&sched.lock)
@@ -5474,6 +5498,7 @@ func mget() *m {
// Put gp on the global runnable queue.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func globrunqput(gp *g) {
assertLockHeld(&sched.lock)
@@ -5485,6 +5510,7 @@ func globrunqput(gp *g) {
// Put gp at the head of the global runnable queue.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func globrunqputhead(gp *g) {
assertLockHeld(&sched.lock)
@@ -5497,6 +5523,7 @@ func globrunqputhead(gp *g) {
// This clears *batch.
// sched.lock must be held.
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func globrunqputbatch(batch *gQueue, n int32) {
assertLockHeld(&sched.lock)
@@ -5609,6 +5636,7 @@ func updateTimerPMask(pp *p) {
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func pidleput(_p_ *p) {
assertLockHeld(&sched.lock)
@@ -5628,6 +5656,7 @@ func pidleput(_p_ *p) {
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
+//
//go:nowritebarrierrec
func pidleget() *p {
assertLockHeld(&sched.lock)
@@ -6083,6 +6112,7 @@ func sync_atomic_runtime_procUnpin() {
}
// Active spinning for sync.Mutex.
+//
//go:linkname sync_runtime_canSpin sync.runtime_canSpin
//go:nosplit
func sync_runtime_canSpin(i int) bool {