aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go26
1 files changed, 13 insertions, 13 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 16237e98ec..4aa6b3600e 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -126,7 +126,7 @@ func main() {
})
// Lock the main goroutine onto this, the main OS thread,
- // during initialization. Most programs won't care, but a few
+ // during initialization. Most programs won't care, but a few
// do require certain calls to be made by the main thread.
// Those can arrange for main.main to run in the main thread
// by calling runtime.LockOSThread during initialization
@@ -237,7 +237,7 @@ func forcegchelper() {
//go:nosplit
-// Gosched yields the processor, allowing other goroutines to run. It does not
+// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
mcall(gosched_m)
@@ -424,7 +424,7 @@ func schedinit() {
sched.maxmcount = 10000
- // Cache the framepointer experiment. This affects stack unwinding.
+ // Cache the framepointer experiment. This affects stack unwinding.
framepointer_enabled = haveexperiment("framepointer")
tracebackinit()
@@ -454,7 +454,7 @@ func schedinit() {
}
if buildVersion == "" {
- // Condition should never trigger. This code just serves
+ // Condition should never trigger. This code just serves
// to ensure runtimeĀ·buildVersion is kept in the resulting binary.
buildVersion = "unknown"
}
@@ -1014,7 +1014,7 @@ func startTheWorldWithSema() {
// in the hope that it will be available next time.
// It would have been even better to start it before the collection,
// but doing so requires allocating memory, so it's tricky to
- // coordinate. This lazy approach works out in practice:
+ // coordinate. This lazy approach works out in practice:
// we don't mind if the first couple gc rounds don't have quite
// the maximum number of procs.
newm(mhelpgc, nil)
@@ -1470,7 +1470,7 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
-// Create a new m. It will start off with a call to fn, or else the scheduler.
+// Create a new m. It will start off with a call to fn, or else the scheduler.
// fn needs to be static and not a heap allocated closure.
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrier
@@ -2641,7 +2641,7 @@ func newproc(siz int32, fn *funcval) {
// Create a new g running fn with narg bytes of arguments starting
// at argp and returning nret bytes of results. callerpc is the
-// address of the go statement that created this. The new g is put
+// address of the go statement that created this. The new g is put
// on the queue of g's waiting to run.
func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
_g_ := getg()
@@ -2792,7 +2792,7 @@ retry:
_p_.gfree = gp.schedlink.ptr()
_p_.gfreecnt--
if gp.stack.lo == 0 {
- // Stack was deallocated in gfput. Allocate a new one.
+ // Stack was deallocated in gfput. Allocate a new one.
systemstack(func() {
gp.stack, gp.stkbar = stackalloc(_FixedStack)
})
@@ -3128,7 +3128,7 @@ func setcpuprofilerate_m(hz int32) {
_g_.m.locks--
}
-// Change number of processors. The world is stopped, sched is locked.
+// Change number of processors. The world is stopped, sched is locked.
// gcworkbufs are not being modified by either the GC or
// the write barrier code.
// Returns list of Ps with local work, they need to be scheduled by the caller.
@@ -3334,7 +3334,7 @@ func incidlelocked(v int32) {
// The check is based on number of running M's, if 0 -> deadlock.
func checkdead() {
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
- // there are no running goroutines. The calling program is
+ // there are no running goroutines. The calling program is
// assumed to be running.
if islibrary || isarchive {
return
@@ -3579,7 +3579,7 @@ func retake(now int64) uint32 {
}
// Tell all goroutines that they have been preempted and they should stop.
-// This function is purely best-effort. It can fail to inform a goroutine if a
+// This function is purely best-effort. It can fail to inform a goroutine if a
// processor just started running it.
// No locks need to be held.
// Returns true if preemption request was issued to at least one goroutine.
@@ -3598,8 +3598,8 @@ func preemptall() bool {
}
// Tell the goroutine running on processor P to stop.
-// This function is purely best-effort. It can incorrectly fail to inform the
-// goroutine. It can send inform the wrong goroutine. Even if it informs the
+// This function is purely best-effort. It can incorrectly fail to inform the
+// goroutine. It can send inform the wrong goroutine. Even if it informs the
// correct goroutine, that goroutine might ignore the request if it is
// simultaneously executing newstack.
// No lock needs to be held.