aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/proc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/proc.go')
-rw-r--r--src/runtime/proc.go123
1 files changed, 108 insertions, 15 deletions
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 0319de5fde..73a789c189 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -490,8 +490,29 @@ func lockedOSThread() bool {
}
var (
- allgs []*g
+ // allgs contains all Gs ever created (including dead Gs), and thus
+ // never shrinks.
+ //
+ // Access via the slice is protected by allglock or stop-the-world.
+ // Readers that cannot take the lock may (carefully!) use the atomic
+ // variables below.
allglock mutex
+ allgs []*g
+
+ // allglen and allgptr are atomic variables that contain len(allg) and
+ // &allg[0] respectively. Proper ordering depends on totally-ordered
+ // loads and stores. Writes are protected by allglock.
+ //
+ // allgptr is updated before allglen. Readers should read allglen
+ // before allgptr to ensure that allglen is always <= len(allgptr). New
+ // Gs appended during the race can be missed. For a consistent view of
+ // all Gs, allglock must be held.
+ //
+ // allgptr copies should always be stored as a concrete type or
+ // unsafe.Pointer, not uintptr, to ensure that GC can still reach it
+ // even if it points to a stale array.
+ allglen uintptr
+ allgptr **g
)
func allgadd(gp *g) {
@@ -501,10 +522,25 @@ func allgadd(gp *g) {
lock(&allglock)
allgs = append(allgs, gp)
- allglen = uintptr(len(allgs))
+ if &allgs[0] != allgptr {
+ atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
+ }
+ atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
unlock(&allglock)
}
+// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
+func atomicAllG() (**g, uintptr) {
+ length := atomic.Loaduintptr(&allglen)
+ ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
+ return ptr, length
+}
+
+// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
+func atomicAllGIndex(ptr **g, i uintptr) *g {
+ return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+}
+
const (
// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
@@ -1170,6 +1206,33 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
return startTime
}
+// usesLibcall indicates whether this runtime performs system calls
+// via libcall.
+func usesLibcall() bool {
+ switch GOOS {
+ case "aix", "darwin", "illumos", "ios", "solaris", "windows":
+ return true
+ case "openbsd":
+ return GOARCH == "amd64" || GOARCH == "arm64"
+ }
+ return false
+}
+
+// mStackIsSystemAllocated indicates whether this runtime starts on a
+// system-allocated stack.
+func mStackIsSystemAllocated() bool {
+ switch GOOS {
+ case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
+ return true
+ case "openbsd":
+ switch GOARCH {
+ case "amd64", "arm64":
+ return true
+ }
+ }
+ return false
+}
+
// mstart is the entry-point for new Ms.
//
// This must not split the stack because we may not even have stack
@@ -1188,6 +1251,11 @@ func mstart() {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
// minit may update the stack bounds.
+ //
+ // Note: these bounds may not be very accurate.
+ // We set hi to &size, but there are things above
+ // it. The 1024 is supposed to compensate this,
+ // but is somewhat arbitrary.
size := _g_.stack.hi
if size == 0 {
size = 8192 * sys.StackGuardMultiplier
@@ -1204,8 +1272,7 @@ func mstart() {
mstart1()
// Exit this thread.
- switch GOOS {
- case "windows", "solaris", "illumos", "plan9", "darwin", "ios", "aix":
+ if mStackIsSystemAllocated() {
// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
// the stack, but put it in _g_.stack before mstart,
// so the logic above hasn't set osStack yet.
@@ -1313,7 +1380,7 @@ func mexit(osStack bool) {
throw("locked m0 woke up")
}
- sigblock()
+ sigblock(true)
unminit()
// Free the gsignal stack.
@@ -1363,7 +1430,7 @@ found:
checkdead()
unlock(&sched.lock)
- if GOOS == "darwin" {
+ if GOOS == "darwin" || GOOS == "ios" {
// Make sure pendingPreemptSignals is correct when an M exits.
// For #41702.
if atomic.Load(&m.signalPending) != 0 {
@@ -1371,6 +1438,10 @@ found:
}
}
+ // Destroy all allocated resources. After this is called, we may no
+ // longer take any locks.
+ mdestroy(m)
+
if osStack {
// Return from mstart and let the system thread
// library free the g0 stack and terminate the thread.
@@ -1515,6 +1586,7 @@ func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
if netpollinited() {
netpollBreak()
}
+ sigRecvPrepareForFixup()
_g_ := getg()
if raceenabled {
// For m's running without racectx, we loan out the
@@ -1683,7 +1755,7 @@ func allocm(_p_ *p, fn func(), id int64) *m {
// In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
// Windows and Plan 9 will layout sched stack on OS stack.
- if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "ios" {
+ if iscgo || mStackIsSystemAllocated() {
mp.g0 = malg(-1)
} else {
mp.g0 = malg(8192 * sys.StackGuardMultiplier)
@@ -1754,7 +1826,7 @@ func needm() {
// starting a new m to run Go code via newosproc.
var sigmask sigset
sigsave(&sigmask)
- sigblock()
+ sigblock(false)
// Lock extra list, take head, unlock popped list.
// nilokay=false is safe here because of the invariant above,
@@ -1903,7 +1975,7 @@ func dropm() {
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
// It's important not to try to handle a signal between those two steps.
sigmask := mp.sigmask
- sigblock()
+ sigblock(false)
unminit()
mnext := lockextra(true)
@@ -2882,7 +2954,9 @@ func wakeNetPoller(when int64) {
} else {
// There are no threads in the network poller, try to get
// one there so it can handle new timers.
- wakep()
+ if GOOS != "plan9" { // Temporary workaround - see issue #42303.
+ wakep()
+ }
}
}
@@ -3774,7 +3848,7 @@ func beforefork() {
// group. See issue #18600.
gp.m.locks++
sigsave(&gp.m.sigmask)
- sigblock()
+ sigblock(false)
// This function is called before fork in syscall package.
// Code between fork and exec must not allocate memory nor even try to grow stack.
@@ -3852,7 +3926,7 @@ func syscall_runtime_BeforeExec() {
// On Darwin, wait for all pending preemption signals to
// be received. See issue #41702.
- if GOOS == "darwin" {
+ if GOOS == "darwin" || GOOS == "ios" {
for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
osyield()
}
@@ -4263,7 +4337,7 @@ func badunlockosthread() {
}
func gcount() int32 {
- n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
+ n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
for _, _p_ := range allp {
n -= _p_.gFree.n
}
@@ -4429,7 +4503,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// Normal traceback is impossible or has failed.
// See if it falls into several common cases.
n = 0
- if (GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "darwin" || GOOS == "ios" || GOOS == "aix") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
+ if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
@@ -4967,7 +5041,6 @@ func checkdead() {
case _Grunnable,
_Grunning,
_Gsyscall:
- unlock(&allglock)
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
throw("checkdead: runnable g")
}
@@ -5128,6 +5201,26 @@ func sysmon() {
}
}
mDoFixup()
+ if GOOS == "netbsd" {
+ // netpoll is responsible for waiting for timer
+ // expiration, so we typically don't have to worry
+ // about starting an M to service timers. (Note that
+ // sleep for timeSleepUntil above simply ensures sysmon
+ // starts running again when that timer expiration may
+ // cause Go code to run again).
+ //
+ // However, netbsd has a kernel bug that sometimes
+ // misses netpollBreak wake-ups, which can lead to
+ // unbounded delays servicing timers. If we detect this
+ // overrun, then startm to get something to handle the
+ // timer.
+ //
+ // See issue 42515 and
+ // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
+ if next, _ := timeSleepUntil(); next < now {
+ startm(nil, false)
+ }
+ }
if atomic.Load(&scavenge.sysmonWake) != 0 {
// Kick the scavenger awake if someone requested it.
wakeScavenger()